Merge branch 'x86-spinlocks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 struct amdgpu_sync_entry {
36         struct hlist_node       node;
37         struct dma_fence        *fence;
38 };
39
40 static struct kmem_cache *amdgpu_sync_slab;
41
42 /**
43  * amdgpu_sync_create - zero init sync object
44  *
45  * @sync: sync object to initialize
46  *
47  * Just clear the sync object for now.
48  */
49 void amdgpu_sync_create(struct amdgpu_sync *sync)
50 {
51         hash_init(sync->fences);
52         sync->last_vm_update = NULL;
53 }
54
55 /**
56  * amdgpu_sync_same_dev - test if fence belong to us
57  *
58  * @adev: amdgpu device to use for the test
59  * @f: fence to test
60  *
61  * Test if the fence was issued by us.
62  */
63 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
64                                  struct dma_fence *f)
65 {
66         struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
67
68         if (s_fence) {
69                 struct amdgpu_ring *ring;
70
71                 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
72                 return ring->adev == adev;
73         }
74
75         return false;
76 }
77
78 /**
79  * amdgpu_sync_get_owner - extract the owner of a fence
80  *
81  * @fence: fence get the owner from
82  *
83  * Extract who originally created the fence.
84  */
85 static void *amdgpu_sync_get_owner(struct dma_fence *f)
86 {
87         struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
88
89         if (s_fence)
90                 return s_fence->owner;
91
92         return AMDGPU_FENCE_OWNER_UNDEFINED;
93 }
94
95 /**
96  * amdgpu_sync_keep_later - Keep the later fence
97  *
98  * @keep: existing fence to test
99  * @fence: new fence
100  *
101  * Either keep the existing fence or the new one, depending which one is later.
102  */
103 static void amdgpu_sync_keep_later(struct dma_fence **keep,
104                                    struct dma_fence *fence)
105 {
106         if (*keep && dma_fence_is_later(*keep, fence))
107                 return;
108
109         dma_fence_put(*keep);
110         *keep = dma_fence_get(fence);
111 }
112
113 /**
114  * amdgpu_sync_add_later - add the fence to the hash
115  *
116  * @sync: sync object to add the fence to
117  * @f: fence to add
118  *
119  * Tries to add the fence to an existing hash entry. Returns true when an entry
120  * was found, false otherwise.
121  */
122 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
123 {
124         struct amdgpu_sync_entry *e;
125
126         hash_for_each_possible(sync->fences, e, node, f->context) {
127                 if (unlikely(e->fence->context != f->context))
128                         continue;
129
130                 amdgpu_sync_keep_later(&e->fence, f);
131                 return true;
132         }
133         return false;
134 }
135
136 /**
137  * amdgpu_sync_fence - remember to sync to this fence
138  *
139  * @sync: sync object to add fence to
140  * @fence: fence to sync to
141  *
142  */
143 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
144                       struct dma_fence *f)
145 {
146         struct amdgpu_sync_entry *e;
147
148         if (!f)
149                 return 0;
150
151         if (amdgpu_sync_same_dev(adev, f) &&
152             amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
153                 amdgpu_sync_keep_later(&sync->last_vm_update, f);
154
155         if (amdgpu_sync_add_later(sync, f))
156                 return 0;
157
158         e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
159         if (!e)
160                 return -ENOMEM;
161
162         hash_add(sync->fences, &e->node, f->context);
163         e->fence = dma_fence_get(f);
164         return 0;
165 }
166
167 /**
168  * amdgpu_sync_resv - sync to a reservation object
169  *
170  * @sync: sync object to add fences from reservation object to
171  * @resv: reservation object with embedded fence
172  * @shared: true if we should only sync to the exclusive fence
173  *
174  * Sync to the fence
175  */
176 int amdgpu_sync_resv(struct amdgpu_device *adev,
177                      struct amdgpu_sync *sync,
178                      struct reservation_object *resv,
179                      void *owner)
180 {
181         struct reservation_object_list *flist;
182         struct dma_fence *f;
183         void *fence_owner;
184         unsigned i;
185         int r = 0;
186
187         if (resv == NULL)
188                 return -EINVAL;
189
190         /* always sync to the exclusive fence */
191         f = reservation_object_get_excl(resv);
192         r = amdgpu_sync_fence(adev, sync, f);
193
194         flist = reservation_object_get_list(resv);
195         if (!flist || r)
196                 return r;
197
198         for (i = 0; i < flist->shared_count; ++i) {
199                 f = rcu_dereference_protected(flist->shared[i],
200                                               reservation_object_held(resv));
201                 if (amdgpu_sync_same_dev(adev, f)) {
202                         /* VM updates are only interesting
203                          * for other VM updates and moves.
204                          */
205                         fence_owner = amdgpu_sync_get_owner(f);
206                         if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
207                             (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
208                             ((owner == AMDGPU_FENCE_OWNER_VM) !=
209                              (fence_owner == AMDGPU_FENCE_OWNER_VM)))
210                                 continue;
211
212                         /* Ignore fence from the same owner as
213                          * long as it isn't undefined.
214                          */
215                         if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
216                             fence_owner == owner)
217                                 continue;
218                 }
219
220                 r = amdgpu_sync_fence(adev, sync, f);
221                 if (r)
222                         break;
223         }
224         return r;
225 }
226
227 /**
228  * amdgpu_sync_peek_fence - get the next fence not signaled yet
229  *
230  * @sync: the sync object
231  * @ring: optional ring to use for test
232  *
233  * Returns the next fence not signaled yet without removing it from the sync
234  * object.
235  */
236 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
237                                          struct amdgpu_ring *ring)
238 {
239         struct amdgpu_sync_entry *e;
240         struct hlist_node *tmp;
241         int i;
242
243         hash_for_each_safe(sync->fences, i, tmp, e, node) {
244                 struct dma_fence *f = e->fence;
245                 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
246
247                 if (dma_fence_is_signaled(f)) {
248                         hash_del(&e->node);
249                         dma_fence_put(f);
250                         kmem_cache_free(amdgpu_sync_slab, e);
251                         continue;
252                 }
253                 if (ring && s_fence) {
254                         /* For fences from the same ring it is sufficient
255                          * when they are scheduled.
256                          */
257                         if (s_fence->sched == &ring->sched) {
258                                 if (dma_fence_is_signaled(&s_fence->scheduled))
259                                         continue;
260
261                                 return &s_fence->scheduled;
262                         }
263                 }
264
265                 return f;
266         }
267
268         return NULL;
269 }
270
271 /**
272  * amdgpu_sync_get_fence - get the next fence from the sync object
273  *
274  * @sync: sync object to use
275  *
276  * Get and removes the next fence from the sync object not signaled yet.
277  */
278 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
279 {
280         struct amdgpu_sync_entry *e;
281         struct hlist_node *tmp;
282         struct dma_fence *f;
283         int i;
284
285         hash_for_each_safe(sync->fences, i, tmp, e, node) {
286
287                 f = e->fence;
288
289                 hash_del(&e->node);
290                 kmem_cache_free(amdgpu_sync_slab, e);
291
292                 if (!dma_fence_is_signaled(f))
293                         return f;
294
295                 dma_fence_put(f);
296         }
297         return NULL;
298 }
299
300 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
301 {
302         struct amdgpu_sync_entry *e;
303         struct hlist_node *tmp;
304         int i, r;
305
306         hash_for_each_safe(sync->fences, i, tmp, e, node) {
307                 r = dma_fence_wait(e->fence, intr);
308                 if (r)
309                         return r;
310
311                 hash_del(&e->node);
312                 dma_fence_put(e->fence);
313                 kmem_cache_free(amdgpu_sync_slab, e);
314         }
315
316         return 0;
317 }
318
319 /**
320  * amdgpu_sync_free - free the sync object
321  *
322  * @sync: sync object to use
323  *
324  * Free the sync object.
325  */
326 void amdgpu_sync_free(struct amdgpu_sync *sync)
327 {
328         struct amdgpu_sync_entry *e;
329         struct hlist_node *tmp;
330         unsigned i;
331
332         hash_for_each_safe(sync->fences, i, tmp, e, node) {
333                 hash_del(&e->node);
334                 dma_fence_put(e->fence);
335                 kmem_cache_free(amdgpu_sync_slab, e);
336         }
337
338         dma_fence_put(sync->last_vm_update);
339 }
340
341 /**
342  * amdgpu_sync_init - init sync object subsystem
343  *
344  * Allocate the slab allocator.
345  */
346 int amdgpu_sync_init(void)
347 {
348         amdgpu_sync_slab = kmem_cache_create(
349                 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
350                 SLAB_HWCACHE_ALIGN, NULL);
351         if (!amdgpu_sync_slab)
352                 return -ENOMEM;
353
354         return 0;
355 }
356
357 /**
358  * amdgpu_sync_fini - fini sync object subsystem
359  *
360  * Free the slab allocator.
361  */
362 void amdgpu_sync_fini(void)
363 {
364         kmem_cache_destroy(amdgpu_sync_slab);
365 }