ARM: dts: aspeed: swift: Add eMMC device
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / intel_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
9
10 #include "i915_drv.h"
11 #include "i915_globals.h"
12
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16
17 static struct i915_global_context {
18         struct i915_global base;
19         struct kmem_cache *slab_ce;
20 } global;
21
22 static struct intel_context *intel_context_alloc(void)
23 {
24         return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
25 }
26
27 void intel_context_free(struct intel_context *ce)
28 {
29         kmem_cache_free(global.slab_ce, ce);
30 }
31
32 struct intel_context *
33 intel_context_create(struct i915_gem_context *ctx,
34                      struct intel_engine_cs *engine)
35 {
36         struct intel_context *ce;
37
38         ce = intel_context_alloc();
39         if (!ce)
40                 return ERR_PTR(-ENOMEM);
41
42         intel_context_init(ce, ctx, engine);
43         return ce;
44 }
45
46 int __intel_context_do_pin(struct intel_context *ce)
47 {
48         int err;
49
50         if (mutex_lock_interruptible(&ce->pin_mutex))
51                 return -EINTR;
52
53         if (likely(!atomic_read(&ce->pin_count))) {
54                 intel_wakeref_t wakeref;
55
56                 err = 0;
57                 with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
58                         err = ce->ops->pin(ce);
59                 if (err)
60                         goto err;
61
62                 i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
63
64                 smp_mb__before_atomic(); /* flush pin before it is visible */
65         }
66
67         atomic_inc(&ce->pin_count);
68         GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
69
70         mutex_unlock(&ce->pin_mutex);
71         return 0;
72
73 err:
74         mutex_unlock(&ce->pin_mutex);
75         return err;
76 }
77
78 void intel_context_unpin(struct intel_context *ce)
79 {
80         if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
81                 return;
82
83         /* We may be called from inside intel_context_pin() to evict another */
84         intel_context_get(ce);
85         mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
86
87         if (likely(atomic_dec_and_test(&ce->pin_count))) {
88                 ce->ops->unpin(ce);
89
90                 i915_gem_context_put(ce->gem_context);
91                 intel_context_active_release(ce);
92         }
93
94         mutex_unlock(&ce->pin_mutex);
95         intel_context_put(ce);
96 }
97
98 static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
99 {
100         int err;
101
102         err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
103         if (err)
104                 return err;
105
106         /*
107          * And mark it as a globally pinned object to let the shrinker know
108          * it cannot reclaim the object until we release it.
109          */
110         vma->obj->pin_global++;
111         vma->obj->mm.dirty = true;
112
113         return 0;
114 }
115
116 static void __context_unpin_state(struct i915_vma *vma)
117 {
118         vma->obj->pin_global--;
119         __i915_vma_unpin(vma);
120 }
121
122 static void intel_context_retire(struct i915_active *active)
123 {
124         struct intel_context *ce = container_of(active, typeof(*ce), active);
125
126         if (ce->state)
127                 __context_unpin_state(ce->state);
128
129         intel_context_put(ce);
130 }
131
132 void
133 intel_context_init(struct intel_context *ce,
134                    struct i915_gem_context *ctx,
135                    struct intel_engine_cs *engine)
136 {
137         GEM_BUG_ON(!engine->cops);
138
139         kref_init(&ce->ref);
140
141         ce->gem_context = ctx;
142         ce->engine = engine;
143         ce->ops = engine->cops;
144         ce->sseu = engine->sseu;
145
146         INIT_LIST_HEAD(&ce->signal_link);
147         INIT_LIST_HEAD(&ce->signals);
148
149         mutex_init(&ce->pin_mutex);
150
151         i915_active_init(ctx->i915, &ce->active, intel_context_retire);
152 }
153
154 int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
155 {
156         int err;
157
158         if (!i915_active_acquire(&ce->active))
159                 return 0;
160
161         intel_context_get(ce);
162
163         if (!ce->state)
164                 return 0;
165
166         err = __context_pin_state(ce->state, flags);
167         if (err) {
168                 i915_active_cancel(&ce->active);
169                 intel_context_put(ce);
170                 return err;
171         }
172
173         /* Preallocate tracking nodes */
174         if (!i915_gem_context_is_kernel(ce->gem_context)) {
175                 err = i915_active_acquire_preallocate_barrier(&ce->active,
176                                                               ce->engine);
177                 if (err) {
178                         i915_active_release(&ce->active);
179                         return err;
180                 }
181         }
182
183         return 0;
184 }
185
186 void intel_context_active_release(struct intel_context *ce)
187 {
188         /* Nodes preallocated in intel_context_active() */
189         i915_active_acquire_barrier(&ce->active);
190         i915_active_release(&ce->active);
191 }
192
193 static void i915_global_context_shrink(void)
194 {
195         kmem_cache_shrink(global.slab_ce);
196 }
197
198 static void i915_global_context_exit(void)
199 {
200         kmem_cache_destroy(global.slab_ce);
201 }
202
203 static struct i915_global_context global = { {
204         .shrink = i915_global_context_shrink,
205         .exit = i915_global_context_exit,
206 } };
207
208 int __init i915_global_context_init(void)
209 {
210         global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
211         if (!global.slab_ce)
212                 return -ENOMEM;
213
214         i915_global_register(&global.base);
215         return 0;
216 }
217
218 void intel_context_enter_engine(struct intel_context *ce)
219 {
220         intel_engine_pm_get(ce->engine);
221 }
222
223 void intel_context_exit_engine(struct intel_context *ce)
224 {
225         intel_engine_pm_put(ce->engine);
226 }
227
228 struct i915_request *intel_context_create_request(struct intel_context *ce)
229 {
230         struct i915_request *rq;
231         int err;
232
233         err = intel_context_pin(ce);
234         if (unlikely(err))
235                 return ERR_PTR(err);
236
237         rq = i915_request_create(ce);
238         intel_context_unpin(ce);
239
240         return rq;
241 }