Merge tag 'linux-kselftest-4.18-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / ttm / ttm_execbuf_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include <drm/ttm/ttm_execbuf_util.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <linux/wait.h>
33 #include <linux/sched.h>
34 #include <linux/module.h>
35
36 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37                                               struct ttm_validate_buffer *entry)
38 {
39         list_for_each_entry_continue_reverse(entry, list, head) {
40                 struct ttm_buffer_object *bo = entry->bo;
41
42                 reservation_object_unlock(bo->resv);
43         }
44 }
45
46 static void ttm_eu_del_from_lru_locked(struct list_head *list)
47 {
48         struct ttm_validate_buffer *entry;
49
50         list_for_each_entry(entry, list, head) {
51                 struct ttm_buffer_object *bo = entry->bo;
52                 ttm_bo_del_from_lru(bo);
53         }
54 }
55
56 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
57                                 struct list_head *list)
58 {
59         struct ttm_validate_buffer *entry;
60         struct ttm_bo_global *glob;
61
62         if (list_empty(list))
63                 return;
64
65         entry = list_first_entry(list, struct ttm_validate_buffer, head);
66         glob = entry->bo->bdev->glob;
67
68         spin_lock(&glob->lru_lock);
69         list_for_each_entry(entry, list, head) {
70                 struct ttm_buffer_object *bo = entry->bo;
71
72                 ttm_bo_add_to_lru(bo);
73                 reservation_object_unlock(bo->resv);
74         }
75         spin_unlock(&glob->lru_lock);
76
77         if (ticket)
78                 ww_acquire_fini(ticket);
79 }
80 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
81
82 /*
83  * Reserve buffers for validation.
84  *
85  * If a buffer in the list is marked for CPU access, we back off and
86  * wait for that buffer to become free for GPU access.
87  *
88  * If a buffer is reserved for another validation, the validator with
89  * the highest validation sequence backs off and waits for that buffer
90  * to become unreserved. This prevents deadlocks when validating multiple
91  * buffers in different orders.
92  */
93
94 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
95                            struct list_head *list, bool intr,
96                            struct list_head *dups)
97 {
98         struct ttm_bo_global *glob;
99         struct ttm_validate_buffer *entry;
100         int ret;
101
102         if (list_empty(list))
103                 return 0;
104
105         entry = list_first_entry(list, struct ttm_validate_buffer, head);
106         glob = entry->bo->bdev->glob;
107
108         if (ticket)
109                 ww_acquire_init(ticket, &reservation_ww_class);
110
111         list_for_each_entry(entry, list, head) {
112                 struct ttm_buffer_object *bo = entry->bo;
113
114                 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
115                 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
116                         reservation_object_unlock(bo->resv);
117
118                         ret = -EBUSY;
119
120                 } else if (ret == -EALREADY && dups) {
121                         struct ttm_validate_buffer *safe = entry;
122                         entry = list_prev_entry(entry, head);
123                         list_del(&safe->head);
124                         list_add(&safe->head, dups);
125                         continue;
126                 }
127
128                 if (!ret) {
129                         if (!entry->shared)
130                                 continue;
131
132                         ret = reservation_object_reserve_shared(bo->resv);
133                         if (!ret)
134                                 continue;
135                 }
136
137                 /* uh oh, we lost out, drop every reservation and try
138                  * to only reserve this buffer, then start over if
139                  * this succeeds.
140                  */
141                 ttm_eu_backoff_reservation_reverse(list, entry);
142
143                 if (ret == -EDEADLK) {
144                         if (intr) {
145                                 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
146                                                                        ticket);
147                         } else {
148                                 ww_mutex_lock_slow(&bo->resv->lock, ticket);
149                                 ret = 0;
150                         }
151                 }
152
153                 if (!ret && entry->shared)
154                         ret = reservation_object_reserve_shared(bo->resv);
155
156                 if (unlikely(ret != 0)) {
157                         if (ret == -EINTR)
158                                 ret = -ERESTARTSYS;
159                         if (ticket) {
160                                 ww_acquire_done(ticket);
161                                 ww_acquire_fini(ticket);
162                         }
163                         return ret;
164                 }
165
166                 /* move this item to the front of the list,
167                  * forces correct iteration of the loop without keeping track
168                  */
169                 list_del(&entry->head);
170                 list_add(&entry->head, list);
171         }
172
173         if (ticket)
174                 ww_acquire_done(ticket);
175         spin_lock(&glob->lru_lock);
176         ttm_eu_del_from_lru_locked(list);
177         spin_unlock(&glob->lru_lock);
178         return 0;
179 }
180 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
181
182 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
183                                  struct list_head *list,
184                                  struct dma_fence *fence)
185 {
186         struct ttm_validate_buffer *entry;
187         struct ttm_buffer_object *bo;
188         struct ttm_bo_global *glob;
189         struct ttm_bo_device *bdev;
190         struct ttm_bo_driver *driver;
191
192         if (list_empty(list))
193                 return;
194
195         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
196         bdev = bo->bdev;
197         driver = bdev->driver;
198         glob = bo->bdev->glob;
199
200         spin_lock(&glob->lru_lock);
201
202         list_for_each_entry(entry, list, head) {
203                 bo = entry->bo;
204                 if (entry->shared)
205                         reservation_object_add_shared_fence(bo->resv, fence);
206                 else
207                         reservation_object_add_excl_fence(bo->resv, fence);
208                 ttm_bo_add_to_lru(bo);
209                 reservation_object_unlock(bo->resv);
210         }
211         spin_unlock(&glob->lru_lock);
212         if (ticket)
213                 ww_acquire_fini(ticket);
214 }
215 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);