drm/msm: Drop mm_lock in scan loop
[sfrench/cifs-2.6.git] / drivers / gpu / drm / msm / msm_gem_shrinker.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9 #include "msm_gpu.h"
10 #include "msm_gpu_trace.h"
11
12 static unsigned long
13 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
14 {
15         struct msm_drm_private *priv =
16                 container_of(shrinker, struct msm_drm_private, shrinker);
17         return priv->shrinkable_count;
18 }
19
20 static unsigned long
21 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
22 {
23         struct msm_drm_private *priv =
24                 container_of(shrinker, struct msm_drm_private, shrinker);
25         struct list_head still_in_list;
26         unsigned long freed = 0;
27
28         INIT_LIST_HEAD(&still_in_list);
29
30         mutex_lock(&priv->mm_lock);
31
32         while (freed < sc->nr_to_scan) {
33                 struct msm_gem_object *msm_obj = list_first_entry_or_null(
34                                 &priv->inactive_dontneed, typeof(*msm_obj), mm_list);
35
36                 if (!msm_obj)
37                         break;
38
39                 list_move_tail(&msm_obj->mm_list, &still_in_list);
40
41                 /*
42                  * If it is in the process of being freed, msm_gem_free_object
43                  * can be blocked on mm_lock waiting to remove it.  So just
44                  * skip it.
45                  */
46                 if (!kref_get_unless_zero(&msm_obj->base.refcount))
47                         continue;
48
49                 /*
50                  * Now that we own a reference, we can drop mm_lock for the
51                  * rest of the loop body, to reduce contention with the
52                  * retire_submit path (which could make more objects purgable)
53                  */
54
55                 mutex_unlock(&priv->mm_lock);
56
57                 /*
58                  * Note that this still needs to be trylock, since we can
59                  * hit shrinker in response to trying to get backing pages
60                  * for this obj (ie. while it's lock is already held)
61                  */
62                 if (!msm_gem_trylock(&msm_obj->base))
63                         goto tail;
64
65                 if (is_purgeable(msm_obj)) {
66                         /*
67                          * This will move the obj out of still_in_list to
68                          * the purged list
69                          */
70                         msm_gem_purge(&msm_obj->base);
71                         freed += msm_obj->base.size >> PAGE_SHIFT;
72                 }
73                 msm_gem_unlock(&msm_obj->base);
74
75 tail:
76                 drm_gem_object_put(&msm_obj->base);
77                 mutex_lock(&priv->mm_lock);
78         }
79
80         list_splice_tail(&still_in_list, &priv->inactive_dontneed);
81         mutex_unlock(&priv->mm_lock);
82
83         if (freed > 0) {
84                 trace_msm_gem_purge(freed << PAGE_SHIFT);
85         } else {
86                 return SHRINK_STOP;
87         }
88
89         return freed;
90 }
91
92 /* since we don't know any better, lets bail after a few
93  * and if necessary the shrinker will be invoked again.
94  * Seems better than unmapping *everything*
95  */
96 static const int vmap_shrink_limit = 15;
97
98 static unsigned
99 vmap_shrink(struct list_head *mm_list)
100 {
101         struct msm_gem_object *msm_obj;
102         unsigned unmapped = 0;
103
104         list_for_each_entry(msm_obj, mm_list, mm_list) {
105                 /* Use trylock, because we cannot block on a obj that
106                  * might be trying to acquire mm_lock
107                  */
108                 if (!msm_gem_trylock(&msm_obj->base))
109                         continue;
110                 if (is_vunmapable(msm_obj)) {
111                         msm_gem_vunmap(&msm_obj->base);
112                         unmapped++;
113                 }
114                 msm_gem_unlock(&msm_obj->base);
115
116                 if (++unmapped >= vmap_shrink_limit)
117                         break;
118         }
119
120         return unmapped;
121 }
122
123 static int
124 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
125 {
126         struct msm_drm_private *priv =
127                 container_of(nb, struct msm_drm_private, vmap_notifier);
128         struct list_head *mm_lists[] = {
129                 &priv->inactive_dontneed,
130                 &priv->inactive_willneed,
131                 priv->gpu ? &priv->gpu->active_list : NULL,
132                 NULL,
133         };
134         unsigned idx, unmapped = 0;
135
136         mutex_lock(&priv->mm_lock);
137
138         for (idx = 0; mm_lists[idx]; idx++) {
139                 unmapped += vmap_shrink(mm_lists[idx]);
140
141                 if (unmapped >= vmap_shrink_limit)
142                         break;
143         }
144
145         mutex_unlock(&priv->mm_lock);
146
147         *(unsigned long *)ptr += unmapped;
148
149         if (unmapped > 0)
150                 trace_msm_gem_purge_vmaps(unmapped);
151
152         return NOTIFY_DONE;
153 }
154
155 /**
156  * msm_gem_shrinker_init - Initialize msm shrinker
157  * @dev: drm device
158  *
159  * This function registers and sets up the msm shrinker.
160  */
161 void msm_gem_shrinker_init(struct drm_device *dev)
162 {
163         struct msm_drm_private *priv = dev->dev_private;
164         priv->shrinker.count_objects = msm_gem_shrinker_count;
165         priv->shrinker.scan_objects = msm_gem_shrinker_scan;
166         priv->shrinker.seeks = DEFAULT_SEEKS;
167         WARN_ON(register_shrinker(&priv->shrinker));
168
169         priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
170         WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
171 }
172
173 /**
174  * msm_gem_shrinker_cleanup - Clean up msm shrinker
175  * @dev: drm device
176  *
177  * This function unregisters the msm shrinker.
178  */
179 void msm_gem_shrinker_cleanup(struct drm_device *dev)
180 {
181         struct msm_drm_private *priv = dev->dev_private;
182
183         if (priv->shrinker.nr_deferred) {
184                 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
185                 unregister_shrinker(&priv->shrinker);
186         }
187 }