1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
10 #include "msm_gpu_trace.h"
13 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
15 struct msm_drm_private *priv =
16 container_of(shrinker, struct msm_drm_private, shrinker);
17 return priv->shrinkable_count;
21 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
23 struct msm_drm_private *priv =
24 container_of(shrinker, struct msm_drm_private, shrinker);
25 struct msm_gem_object *msm_obj;
26 unsigned long freed = 0;
28 mutex_lock(&priv->mm_lock);
30 list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
31 if (freed >= sc->nr_to_scan)
33 /* Use trylock, because we cannot block on a obj that
34 * might be trying to acquire mm_lock
36 if (!msm_gem_trylock(&msm_obj->base))
38 if (is_purgeable(msm_obj)) {
39 msm_gem_purge(&msm_obj->base);
40 freed += msm_obj->base.size >> PAGE_SHIFT;
42 msm_gem_unlock(&msm_obj->base);
45 mutex_unlock(&priv->mm_lock);
48 trace_msm_gem_purge(freed << PAGE_SHIFT);
56 /* since we don't know any better, lets bail after a few
57 * and if necessary the shrinker will be invoked again.
58 * Seems better than unmapping *everything*
60 static const int vmap_shrink_limit = 15;
63 vmap_shrink(struct list_head *mm_list)
65 struct msm_gem_object *msm_obj;
66 unsigned unmapped = 0;
68 list_for_each_entry(msm_obj, mm_list, mm_list) {
69 /* Use trylock, because we cannot block on a obj that
70 * might be trying to acquire mm_lock
72 if (!msm_gem_trylock(&msm_obj->base))
74 if (is_vunmapable(msm_obj)) {
75 msm_gem_vunmap(&msm_obj->base);
78 msm_gem_unlock(&msm_obj->base);
80 if (++unmapped >= vmap_shrink_limit)
88 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
90 struct msm_drm_private *priv =
91 container_of(nb, struct msm_drm_private, vmap_notifier);
92 struct list_head *mm_lists[] = {
93 &priv->inactive_dontneed,
94 &priv->inactive_willneed,
95 priv->gpu ? &priv->gpu->active_list : NULL,
98 unsigned idx, unmapped = 0;
100 mutex_lock(&priv->mm_lock);
102 for (idx = 0; mm_lists[idx]; idx++) {
103 unmapped += vmap_shrink(mm_lists[idx]);
105 if (unmapped >= vmap_shrink_limit)
109 mutex_unlock(&priv->mm_lock);
111 *(unsigned long *)ptr += unmapped;
114 trace_msm_gem_purge_vmaps(unmapped);
120 * msm_gem_shrinker_init - Initialize msm shrinker
123 * This function registers and sets up the msm shrinker.
125 void msm_gem_shrinker_init(struct drm_device *dev)
127 struct msm_drm_private *priv = dev->dev_private;
128 priv->shrinker.count_objects = msm_gem_shrinker_count;
129 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
130 priv->shrinker.seeks = DEFAULT_SEEKS;
131 WARN_ON(register_shrinker(&priv->shrinker));
133 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
134 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
138 * msm_gem_shrinker_cleanup - Clean up msm shrinker
141 * This function unregisters the msm shrinker.
143 void msm_gem_shrinker_cleanup(struct drm_device *dev)
145 struct msm_drm_private *priv = dev->dev_private;
147 if (priv->shrinker.nr_deferred) {
148 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
149 unregister_shrinker(&priv->shrinker);