77a998b818433bf952901c838108082f2978819b
[sfrench/cifs-2.6.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <linux/jiffies.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40 #include <linux/mm.h>
41 #include <linux/file.h>
42 #include <linux/module.h>
43 #include <linux/atomic.h>
44 #include <linux/dma-resv.h>
45
46 static void ttm_bo_global_kobj_release(struct kobject *kobj);
47
48 /**
49  * ttm_global_mutex - protecting the global BO state
50  */
51 DEFINE_MUTEX(ttm_global_mutex);
52 unsigned ttm_bo_glob_use_count;
53 struct ttm_bo_global ttm_bo_glob;
54 EXPORT_SYMBOL(ttm_bo_glob);
55
56 static struct attribute ttm_bo_count = {
57         .name = "bo_count",
58         .mode = S_IRUGO
59 };
60
61 /* default destructor */
62 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
63 {
64         kfree(bo);
65 }
66
67 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
68                                         struct ttm_placement *placement)
69 {
70         struct drm_printer p = drm_debug_printer(TTM_PFX);
71         struct ttm_resource_manager *man;
72         int i, mem_type;
73
74         drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
75                    bo, bo->mem.num_pages, bo->mem.size >> 10,
76                    bo->mem.size >> 20);
77         for (i = 0; i < placement->num_placement; i++) {
78                 mem_type = placement->placement[i].mem_type;
79                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
80                            i, placement->placement[i].flags, mem_type);
81                 man = ttm_manager_type(bo->bdev, mem_type);
82                 ttm_resource_manager_debug(man, &p);
83         }
84 }
85
86 static ssize_t ttm_bo_global_show(struct kobject *kobj,
87                                   struct attribute *attr,
88                                   char *buffer)
89 {
90         struct ttm_bo_global *glob =
91                 container_of(kobj, struct ttm_bo_global, kobj);
92
93         return snprintf(buffer, PAGE_SIZE, "%d\n",
94                                 atomic_read(&glob->bo_count));
95 }
96
97 static struct attribute *ttm_bo_global_attrs[] = {
98         &ttm_bo_count,
99         NULL
100 };
101
102 static const struct sysfs_ops ttm_bo_global_ops = {
103         .show = &ttm_bo_global_show
104 };
105
106 static struct kobj_type ttm_bo_glob_kobj_type  = {
107         .release = &ttm_bo_global_kobj_release,
108         .sysfs_ops = &ttm_bo_global_ops,
109         .default_attrs = ttm_bo_global_attrs
110 };
111
112 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
113                                   struct ttm_resource *mem)
114 {
115         struct ttm_bo_device *bdev = bo->bdev;
116         struct ttm_resource_manager *man;
117
118         if (!list_empty(&bo->lru) || bo->pin_count)
119                 return;
120
121         man = ttm_manager_type(bdev, mem->mem_type);
122         list_add_tail(&bo->lru, &man->lru[bo->priority]);
123
124         if (man->use_tt && bo->ttm &&
125             !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
126                                      TTM_PAGE_FLAG_SWAPPED))) {
127                 list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
128         }
129 }
130
131 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
132 {
133         struct ttm_bo_device *bdev = bo->bdev;
134         bool notify = false;
135
136         if (!list_empty(&bo->swap)) {
137                 list_del_init(&bo->swap);
138                 notify = true;
139         }
140         if (!list_empty(&bo->lru)) {
141                 list_del_init(&bo->lru);
142                 notify = true;
143         }
144
145         if (notify && bdev->driver->del_from_lru_notify)
146                 bdev->driver->del_from_lru_notify(bo);
147 }
148
149 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
150                                      struct ttm_buffer_object *bo)
151 {
152         if (!pos->first)
153                 pos->first = bo;
154         pos->last = bo;
155 }
156
157 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
158                              struct ttm_lru_bulk_move *bulk)
159 {
160         dma_resv_assert_held(bo->base.resv);
161
162         ttm_bo_del_from_lru(bo);
163         ttm_bo_add_mem_to_lru(bo, &bo->mem);
164
165         if (bulk && !bo->pin_count) {
166                 switch (bo->mem.mem_type) {
167                 case TTM_PL_TT:
168                         ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
169                         break;
170
171                 case TTM_PL_VRAM:
172                         ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
173                         break;
174                 }
175                 if (bo->ttm && !(bo->ttm->page_flags &
176                                  (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
177                         ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
178         }
179 }
180 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
181
182 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
183 {
184         unsigned i;
185
186         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
187                 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
188                 struct ttm_resource_manager *man;
189
190                 if (!pos->first)
191                         continue;
192
193                 dma_resv_assert_held(pos->first->base.resv);
194                 dma_resv_assert_held(pos->last->base.resv);
195
196                 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
197                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
198                                     &pos->last->lru);
199         }
200
201         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
202                 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
203                 struct ttm_resource_manager *man;
204
205                 if (!pos->first)
206                         continue;
207
208                 dma_resv_assert_held(pos->first->base.resv);
209                 dma_resv_assert_held(pos->last->base.resv);
210
211                 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
212                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
213                                     &pos->last->lru);
214         }
215
216         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
217                 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
218                 struct list_head *lru;
219
220                 if (!pos->first)
221                         continue;
222
223                 dma_resv_assert_held(pos->first->base.resv);
224                 dma_resv_assert_held(pos->last->base.resv);
225
226                 lru = &ttm_bo_glob.swap_lru[i];
227                 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
228         }
229 }
230 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
231
232 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
233                                   struct ttm_resource *mem, bool evict,
234                                   struct ttm_operation_ctx *ctx)
235 {
236         struct ttm_bo_device *bdev = bo->bdev;
237         struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
238         struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
239         int ret;
240
241         ttm_bo_unmap_virtual(bo);
242
243         /*
244          * Create and bind a ttm if required.
245          */
246
247         if (new_man->use_tt) {
248                 /* Zero init the new TTM structure if the old location should
249                  * have used one as well.
250                  */
251                 ret = ttm_tt_create(bo, old_man->use_tt);
252                 if (ret)
253                         goto out_err;
254
255                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
256                 if (ret)
257                         goto out_err;
258
259                 if (mem->mem_type != TTM_PL_SYSTEM) {
260                         ret = ttm_tt_populate(bdev, bo->ttm, ctx);
261                         if (ret)
262                                 goto out_err;
263
264                         ret = ttm_bo_tt_bind(bo, mem);
265                         if (ret)
266                                 goto out_err;
267                 }
268
269                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
270                         if (bdev->driver->move_notify)
271                                 bdev->driver->move_notify(bo, evict, mem);
272                         bo->mem = *mem;
273                         goto moved;
274                 }
275         }
276
277         if (bdev->driver->move_notify)
278                 bdev->driver->move_notify(bo, evict, mem);
279
280         if (old_man->use_tt && new_man->use_tt)
281                 ret = ttm_bo_move_ttm(bo, ctx, mem);
282         else if (bdev->driver->move)
283                 ret = bdev->driver->move(bo, evict, ctx, mem);
284         else
285                 ret = ttm_bo_move_memcpy(bo, ctx, mem);
286
287         if (ret) {
288                 if (bdev->driver->move_notify) {
289                         swap(*mem, bo->mem);
290                         bdev->driver->move_notify(bo, false, mem);
291                         swap(*mem, bo->mem);
292                 }
293
294                 goto out_err;
295         }
296
297 moved:
298         ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
299         return 0;
300
301 out_err:
302         new_man = ttm_manager_type(bdev, bo->mem.mem_type);
303         if (!new_man->use_tt)
304                 ttm_bo_tt_destroy(bo);
305
306         return ret;
307 }
308
309 /**
310  * Call bo::reserved.
311  * Will release GPU memory type usage on destruction.
312  * This is the place to put in driver specific hooks to release
313  * driver private resources.
314  * Will release the bo::reserved lock.
315  */
316
317 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
318 {
319         if (bo->bdev->driver->move_notify)
320                 bo->bdev->driver->move_notify(bo, false, NULL);
321
322         ttm_bo_tt_destroy(bo);
323         ttm_resource_free(bo, &bo->mem);
324 }
325
326 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
327 {
328         int r;
329
330         if (bo->base.resv == &bo->base._resv)
331                 return 0;
332
333         BUG_ON(!dma_resv_trylock(&bo->base._resv));
334
335         r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
336         dma_resv_unlock(&bo->base._resv);
337         if (r)
338                 return r;
339
340         if (bo->type != ttm_bo_type_sg) {
341                 /* This works because the BO is about to be destroyed and nobody
342                  * reference it any more. The only tricky case is the trylock on
343                  * the resv object while holding the lru_lock.
344                  */
345                 spin_lock(&ttm_bo_glob.lru_lock);
346                 bo->base.resv = &bo->base._resv;
347                 spin_unlock(&ttm_bo_glob.lru_lock);
348         }
349
350         return r;
351 }
352
353 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
354 {
355         struct dma_resv *resv = &bo->base._resv;
356         struct dma_resv_list *fobj;
357         struct dma_fence *fence;
358         int i;
359
360         rcu_read_lock();
361         fobj = rcu_dereference(resv->fence);
362         fence = rcu_dereference(resv->fence_excl);
363         if (fence && !fence->ops->signaled)
364                 dma_fence_enable_sw_signaling(fence);
365
366         for (i = 0; fobj && i < fobj->shared_count; ++i) {
367                 fence = rcu_dereference(fobj->shared[i]);
368
369                 if (!fence->ops->signaled)
370                         dma_fence_enable_sw_signaling(fence);
371         }
372         rcu_read_unlock();
373 }
374
375 /**
376  * function ttm_bo_cleanup_refs
377  * If bo idle, remove from lru lists, and unref.
378  * If not idle, block if possible.
379  *
380  * Must be called with lru_lock and reservation held, this function
381  * will drop the lru lock and optionally the reservation lock before returning.
382  *
383  * @interruptible         Any sleeps should occur interruptibly.
384  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
385  * @unlock_resv           Unlock the reservation lock as well.
386  */
387
388 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
389                                bool interruptible, bool no_wait_gpu,
390                                bool unlock_resv)
391 {
392         struct dma_resv *resv = &bo->base._resv;
393         int ret;
394
395         if (dma_resv_test_signaled_rcu(resv, true))
396                 ret = 0;
397         else
398                 ret = -EBUSY;
399
400         if (ret && !no_wait_gpu) {
401                 long lret;
402
403                 if (unlock_resv)
404                         dma_resv_unlock(bo->base.resv);
405                 spin_unlock(&ttm_bo_glob.lru_lock);
406
407                 lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
408                                                  30 * HZ);
409
410                 if (lret < 0)
411                         return lret;
412                 else if (lret == 0)
413                         return -EBUSY;
414
415                 spin_lock(&ttm_bo_glob.lru_lock);
416                 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
417                         /*
418                          * We raced, and lost, someone else holds the reservation now,
419                          * and is probably busy in ttm_bo_cleanup_memtype_use.
420                          *
421                          * Even if it's not the case, because we finished waiting any
422                          * delayed destruction would succeed, so just return success
423                          * here.
424                          */
425                         spin_unlock(&ttm_bo_glob.lru_lock);
426                         return 0;
427                 }
428                 ret = 0;
429         }
430
431         if (ret || unlikely(list_empty(&bo->ddestroy))) {
432                 if (unlock_resv)
433                         dma_resv_unlock(bo->base.resv);
434                 spin_unlock(&ttm_bo_glob.lru_lock);
435                 return ret;
436         }
437
438         ttm_bo_del_from_lru(bo);
439         list_del_init(&bo->ddestroy);
440         spin_unlock(&ttm_bo_glob.lru_lock);
441         ttm_bo_cleanup_memtype_use(bo);
442
443         if (unlock_resv)
444                 dma_resv_unlock(bo->base.resv);
445
446         ttm_bo_put(bo);
447
448         return 0;
449 }
450
451 /**
452  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
453  * encountered buffers.
454  */
455 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
456 {
457         struct ttm_bo_global *glob = &ttm_bo_glob;
458         struct list_head removed;
459         bool empty;
460
461         INIT_LIST_HEAD(&removed);
462
463         spin_lock(&glob->lru_lock);
464         while (!list_empty(&bdev->ddestroy)) {
465                 struct ttm_buffer_object *bo;
466
467                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
468                                       ddestroy);
469                 list_move_tail(&bo->ddestroy, &removed);
470                 if (!ttm_bo_get_unless_zero(bo))
471                         continue;
472
473                 if (remove_all || bo->base.resv != &bo->base._resv) {
474                         spin_unlock(&glob->lru_lock);
475                         dma_resv_lock(bo->base.resv, NULL);
476
477                         spin_lock(&glob->lru_lock);
478                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
479
480                 } else if (dma_resv_trylock(bo->base.resv)) {
481                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
482                 } else {
483                         spin_unlock(&glob->lru_lock);
484                 }
485
486                 ttm_bo_put(bo);
487                 spin_lock(&glob->lru_lock);
488         }
489         list_splice_tail(&removed, &bdev->ddestroy);
490         empty = list_empty(&bdev->ddestroy);
491         spin_unlock(&glob->lru_lock);
492
493         return empty;
494 }
495
496 static void ttm_bo_delayed_workqueue(struct work_struct *work)
497 {
498         struct ttm_bo_device *bdev =
499             container_of(work, struct ttm_bo_device, wq.work);
500
501         if (!ttm_bo_delayed_delete(bdev, false))
502                 schedule_delayed_work(&bdev->wq,
503                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
504 }
505
506 static void ttm_bo_release(struct kref *kref)
507 {
508         struct ttm_buffer_object *bo =
509             container_of(kref, struct ttm_buffer_object, kref);
510         struct ttm_bo_device *bdev = bo->bdev;
511         size_t acc_size = bo->acc_size;
512         int ret;
513
514         if (!bo->deleted) {
515                 ret = ttm_bo_individualize_resv(bo);
516                 if (ret) {
517                         /* Last resort, if we fail to allocate memory for the
518                          * fences block for the BO to become idle
519                          */
520                         dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
521                                                   30 * HZ);
522                 }
523
524                 if (bo->bdev->driver->release_notify)
525                         bo->bdev->driver->release_notify(bo);
526
527                 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
528                 ttm_mem_io_free(bdev, &bo->mem);
529         }
530
531         if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
532             !dma_resv_trylock(bo->base.resv)) {
533                 /* The BO is not idle, resurrect it for delayed destroy */
534                 ttm_bo_flush_all_fences(bo);
535                 bo->deleted = true;
536
537                 spin_lock(&ttm_bo_glob.lru_lock);
538
539                 /*
540                  * Make pinned bos immediately available to
541                  * shrinkers, now that they are queued for
542                  * destruction.
543                  */
544                 if (bo->pin_count) {
545                         bo->pin_count = 0;
546                         ttm_bo_del_from_lru(bo);
547                         ttm_bo_add_mem_to_lru(bo, &bo->mem);
548                 }
549
550                 kref_init(&bo->kref);
551                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
552                 spin_unlock(&ttm_bo_glob.lru_lock);
553
554                 schedule_delayed_work(&bdev->wq,
555                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
556                 return;
557         }
558
559         spin_lock(&ttm_bo_glob.lru_lock);
560         ttm_bo_del_from_lru(bo);
561         list_del(&bo->ddestroy);
562         spin_unlock(&ttm_bo_glob.lru_lock);
563
564         ttm_bo_cleanup_memtype_use(bo);
565         dma_resv_unlock(bo->base.resv);
566
567         atomic_dec(&ttm_bo_glob.bo_count);
568         dma_fence_put(bo->moving);
569         if (!ttm_bo_uses_embedded_gem_object(bo))
570                 dma_resv_fini(&bo->base._resv);
571         bo->destroy(bo);
572         ttm_mem_global_free(&ttm_mem_glob, acc_size);
573 }
574
575 void ttm_bo_put(struct ttm_buffer_object *bo)
576 {
577         kref_put(&bo->kref, ttm_bo_release);
578 }
579 EXPORT_SYMBOL(ttm_bo_put);
580
581 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
582 {
583         return cancel_delayed_work_sync(&bdev->wq);
584 }
585 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
586
587 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
588 {
589         if (resched)
590                 schedule_delayed_work(&bdev->wq,
591                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
592 }
593 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
594
595 static int ttm_bo_evict(struct ttm_buffer_object *bo,
596                         struct ttm_operation_ctx *ctx)
597 {
598         struct ttm_bo_device *bdev = bo->bdev;
599         struct ttm_resource evict_mem;
600         struct ttm_placement placement;
601         int ret = 0;
602
603         dma_resv_assert_held(bo->base.resv);
604
605         placement.num_placement = 0;
606         placement.num_busy_placement = 0;
607         bdev->driver->evict_flags(bo, &placement);
608
609         if (!placement.num_placement && !placement.num_busy_placement) {
610                 ttm_bo_wait(bo, false, false);
611
612                 ttm_bo_cleanup_memtype_use(bo);
613                 return ttm_tt_create(bo, false);
614         }
615
616         evict_mem = bo->mem;
617         evict_mem.mm_node = NULL;
618         evict_mem.bus.offset = 0;
619         evict_mem.bus.addr = NULL;
620
621         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
622         if (ret) {
623                 if (ret != -ERESTARTSYS) {
624                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
625                                bo);
626                         ttm_bo_mem_space_debug(bo, &placement);
627                 }
628                 goto out;
629         }
630
631         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
632         if (unlikely(ret)) {
633                 if (ret != -ERESTARTSYS)
634                         pr_err("Buffer eviction failed\n");
635                 ttm_resource_free(bo, &evict_mem);
636         }
637 out:
638         return ret;
639 }
640
641 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
642                               const struct ttm_place *place)
643 {
644         /* Don't evict this BO if it's outside of the
645          * requested placement range
646          */
647         if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
648             (place->lpfn && place->lpfn <= bo->mem.start))
649                 return false;
650
651         return true;
652 }
653 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
654
655 /**
656  * Check the target bo is allowable to be evicted or swapout, including cases:
657  *
658  * a. if share same reservation object with ctx->resv, have assumption
659  * reservation objects should already be locked, so not lock again and
660  * return true directly when either the opreation allow_reserved_eviction
661  * or the target bo already is in delayed free list;
662  *
663  * b. Otherwise, trylock it.
664  */
665 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
666                         struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
667 {
668         bool ret = false;
669
670         if (bo->base.resv == ctx->resv) {
671                 dma_resv_assert_held(bo->base.resv);
672                 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
673                         ret = true;
674                 *locked = false;
675                 if (busy)
676                         *busy = false;
677         } else {
678                 ret = dma_resv_trylock(bo->base.resv);
679                 *locked = ret;
680                 if (busy)
681                         *busy = !ret;
682         }
683
684         return ret;
685 }
686
687 /**
688  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
689  *
690  * @busy_bo: BO which couldn't be locked with trylock
691  * @ctx: operation context
692  * @ticket: acquire ticket
693  *
694  * Try to lock a busy buffer object to avoid failing eviction.
695  */
696 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
697                                    struct ttm_operation_ctx *ctx,
698                                    struct ww_acquire_ctx *ticket)
699 {
700         int r;
701
702         if (!busy_bo || !ticket)
703                 return -EBUSY;
704
705         if (ctx->interruptible)
706                 r = dma_resv_lock_interruptible(busy_bo->base.resv,
707                                                           ticket);
708         else
709                 r = dma_resv_lock(busy_bo->base.resv, ticket);
710
711         /*
712          * TODO: It would be better to keep the BO locked until allocation is at
713          * least tried one more time, but that would mean a much larger rework
714          * of TTM.
715          */
716         if (!r)
717                 dma_resv_unlock(busy_bo->base.resv);
718
719         return r == -EDEADLK ? -EBUSY : r;
720 }
721
722 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
723                         struct ttm_resource_manager *man,
724                         const struct ttm_place *place,
725                         struct ttm_operation_ctx *ctx,
726                         struct ww_acquire_ctx *ticket)
727 {
728         struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
729         bool locked = false;
730         unsigned i;
731         int ret;
732
733         spin_lock(&ttm_bo_glob.lru_lock);
734         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
735                 list_for_each_entry(bo, &man->lru[i], lru) {
736                         bool busy;
737
738                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
739                                                             &busy)) {
740                                 if (busy && !busy_bo && ticket !=
741                                     dma_resv_locking_ctx(bo->base.resv))
742                                         busy_bo = bo;
743                                 continue;
744                         }
745
746                         if (place && !bdev->driver->eviction_valuable(bo,
747                                                                       place)) {
748                                 if (locked)
749                                         dma_resv_unlock(bo->base.resv);
750                                 continue;
751                         }
752                         if (!ttm_bo_get_unless_zero(bo)) {
753                                 if (locked)
754                                         dma_resv_unlock(bo->base.resv);
755                                 continue;
756                         }
757                         break;
758                 }
759
760                 /* If the inner loop terminated early, we have our candidate */
761                 if (&bo->lru != &man->lru[i])
762                         break;
763
764                 bo = NULL;
765         }
766
767         if (!bo) {
768                 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
769                         busy_bo = NULL;
770                 spin_unlock(&ttm_bo_glob.lru_lock);
771                 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
772                 if (busy_bo)
773                         ttm_bo_put(busy_bo);
774                 return ret;
775         }
776
777         if (bo->deleted) {
778                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
779                                           ctx->no_wait_gpu, locked);
780                 ttm_bo_put(bo);
781                 return ret;
782         }
783
784         spin_unlock(&ttm_bo_glob.lru_lock);
785
786         ret = ttm_bo_evict(bo, ctx);
787         if (locked)
788                 ttm_bo_unreserve(bo);
789
790         ttm_bo_put(bo);
791         return ret;
792 }
793
794 /**
795  * Add the last move fence to the BO and reserve a new shared slot.
796  */
797 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
798                                  struct ttm_resource_manager *man,
799                                  struct ttm_resource *mem,
800                                  bool no_wait_gpu)
801 {
802         struct dma_fence *fence;
803         int ret;
804
805         spin_lock(&man->move_lock);
806         fence = dma_fence_get(man->move);
807         spin_unlock(&man->move_lock);
808
809         if (!fence)
810                 return 0;
811
812         if (no_wait_gpu) {
813                 dma_fence_put(fence);
814                 return -EBUSY;
815         }
816
817         dma_resv_add_shared_fence(bo->base.resv, fence);
818
819         ret = dma_resv_reserve_shared(bo->base.resv, 1);
820         if (unlikely(ret)) {
821                 dma_fence_put(fence);
822                 return ret;
823         }
824
825         dma_fence_put(bo->moving);
826         bo->moving = fence;
827         return 0;
828 }
829
830 /**
831  * Repeatedly evict memory from the LRU for @mem_type until we create enough
832  * space, or we've evicted everything and there isn't enough space.
833  */
834 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
835                                   const struct ttm_place *place,
836                                   struct ttm_resource *mem,
837                                   struct ttm_operation_ctx *ctx)
838 {
839         struct ttm_bo_device *bdev = bo->bdev;
840         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
841         struct ww_acquire_ctx *ticket;
842         int ret;
843
844         ticket = dma_resv_locking_ctx(bo->base.resv);
845         do {
846                 ret = ttm_resource_alloc(bo, place, mem);
847                 if (likely(!ret))
848                         break;
849                 if (unlikely(ret != -ENOSPC))
850                         return ret;
851                 ret = ttm_mem_evict_first(bdev, man, place, ctx,
852                                           ticket);
853                 if (unlikely(ret != 0))
854                         return ret;
855         } while (1);
856
857         return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
858 }
859
860 static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
861                                       uint32_t cur_placement,
862                                       uint32_t proposed_placement)
863 {
864         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
865         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
866
867         /**
868          * Keep current caching if possible.
869          */
870
871         if ((cur_placement & caching) != 0)
872                 result |= (cur_placement & caching);
873         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
874                 result |= TTM_PL_FLAG_CACHED;
875         else if ((TTM_PL_FLAG_WC & caching) != 0)
876                 result |= TTM_PL_FLAG_WC;
877         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
878                 result |= TTM_PL_FLAG_UNCACHED;
879
880         return result;
881 }
882
883 /**
884  * ttm_bo_mem_placement - check if placement is compatible
885  * @bo: BO to find memory for
886  * @place: where to search
887  * @mem: the memory object to fill in
888  * @ctx: operation context
889  *
890  * Check if placement is compatible and fill in mem structure.
891  * Returns -EBUSY if placement won't work or negative error code.
892  * 0 when placement can be used.
893  */
894 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
895                                 const struct ttm_place *place,
896                                 struct ttm_resource *mem,
897                                 struct ttm_operation_ctx *ctx)
898 {
899         struct ttm_bo_device *bdev = bo->bdev;
900         struct ttm_resource_manager *man;
901         uint32_t cur_flags = 0;
902
903         man = ttm_manager_type(bdev, place->mem_type);
904         if (!man || !ttm_resource_manager_used(man))
905                 return -EBUSY;
906
907         cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
908                                           place->flags);
909         cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
910
911         mem->mem_type = place->mem_type;
912         mem->placement = cur_flags;
913
914         spin_lock(&ttm_bo_glob.lru_lock);
915         ttm_bo_del_from_lru(bo);
916         ttm_bo_add_mem_to_lru(bo, mem);
917         spin_unlock(&ttm_bo_glob.lru_lock);
918
919         return 0;
920 }
921
922 /**
923  * Creates space for memory region @mem according to its type.
924  *
925  * This function first searches for free space in compatible memory types in
926  * the priority order defined by the driver.  If free space isn't found, then
927  * ttm_bo_mem_force_space is attempted in priority order to evict and find
928  * space.
929  */
930 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
931                         struct ttm_placement *placement,
932                         struct ttm_resource *mem,
933                         struct ttm_operation_ctx *ctx)
934 {
935         struct ttm_bo_device *bdev = bo->bdev;
936         bool type_found = false;
937         int i, ret;
938
939         ret = dma_resv_reserve_shared(bo->base.resv, 1);
940         if (unlikely(ret))
941                 return ret;
942
943         for (i = 0; i < placement->num_placement; ++i) {
944                 const struct ttm_place *place = &placement->placement[i];
945                 struct ttm_resource_manager *man;
946
947                 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
948                 if (ret)
949                         continue;
950
951                 type_found = true;
952                 ret = ttm_resource_alloc(bo, place, mem);
953                 if (ret == -ENOSPC)
954                         continue;
955                 if (unlikely(ret))
956                         goto error;
957
958                 man = ttm_manager_type(bdev, mem->mem_type);
959                 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
960                 if (unlikely(ret)) {
961                         ttm_resource_free(bo, mem);
962                         if (ret == -EBUSY)
963                                 continue;
964
965                         goto error;
966                 }
967                 return 0;
968         }
969
970         for (i = 0; i < placement->num_busy_placement; ++i) {
971                 const struct ttm_place *place = &placement->busy_placement[i];
972
973                 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
974                 if (ret)
975                         continue;
976
977                 type_found = true;
978                 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
979                 if (likely(!ret))
980                         return 0;
981
982                 if (ret && ret != -EBUSY)
983                         goto error;
984         }
985
986         ret = -ENOMEM;
987         if (!type_found) {
988                 pr_err(TTM_PFX "No compatible memory type found\n");
989                 ret = -EINVAL;
990         }
991
992 error:
993         if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
994                 ttm_bo_move_to_lru_tail_unlocked(bo);
995         }
996
997         return ret;
998 }
999 EXPORT_SYMBOL(ttm_bo_mem_space);
1000
1001 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1002                               struct ttm_placement *placement,
1003                               struct ttm_operation_ctx *ctx)
1004 {
1005         int ret = 0;
1006         struct ttm_resource mem;
1007
1008         dma_resv_assert_held(bo->base.resv);
1009
1010         mem.num_pages = bo->num_pages;
1011         mem.size = mem.num_pages << PAGE_SHIFT;
1012         mem.page_alignment = bo->mem.page_alignment;
1013         mem.bus.offset = 0;
1014         mem.bus.addr = NULL;
1015         mem.mm_node = NULL;
1016
1017         /*
1018          * Determine where to move the buffer.
1019          */
1020         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1021         if (ret)
1022                 goto out_unlock;
1023         ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1024 out_unlock:
1025         if (ret)
1026                 ttm_resource_free(bo, &mem);
1027         return ret;
1028 }
1029
1030 static bool ttm_bo_places_compat(const struct ttm_place *places,
1031                                  unsigned num_placement,
1032                                  struct ttm_resource *mem,
1033                                  uint32_t *new_flags)
1034 {
1035         unsigned i;
1036
1037         for (i = 0; i < num_placement; i++) {
1038                 const struct ttm_place *heap = &places[i];
1039
1040                 if ((mem->start < heap->fpfn ||
1041                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1042                         continue;
1043
1044                 *new_flags = heap->flags;
1045                 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1046                     (mem->mem_type == heap->mem_type) &&
1047                     (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1048                      (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1049                         return true;
1050         }
1051         return false;
1052 }
1053
1054 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1055                        struct ttm_resource *mem,
1056                        uint32_t *new_flags)
1057 {
1058         if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1059                                  mem, new_flags))
1060                 return true;
1061
1062         if ((placement->busy_placement != placement->placement ||
1063              placement->num_busy_placement > placement->num_placement) &&
1064             ttm_bo_places_compat(placement->busy_placement,
1065                                  placement->num_busy_placement,
1066                                  mem, new_flags))
1067                 return true;
1068
1069         return false;
1070 }
1071 EXPORT_SYMBOL(ttm_bo_mem_compat);
1072
1073 int ttm_bo_validate(struct ttm_buffer_object *bo,
1074                     struct ttm_placement *placement,
1075                     struct ttm_operation_ctx *ctx)
1076 {
1077         int ret;
1078         uint32_t new_flags;
1079
1080         dma_resv_assert_held(bo->base.resv);
1081
1082         /*
1083          * Remove the backing store if no placement is given.
1084          */
1085         if (!placement->num_placement && !placement->num_busy_placement) {
1086                 ret = ttm_bo_pipeline_gutting(bo);
1087                 if (ret)
1088                         return ret;
1089
1090                 return ttm_tt_create(bo, false);
1091         }
1092
1093         /*
1094          * Check whether we need to move buffer.
1095          */
1096         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1097                 ret = ttm_bo_move_buffer(bo, placement, ctx);
1098                 if (ret)
1099                         return ret;
1100         } else {
1101                 bo->mem.placement &= TTM_PL_MASK_CACHING;
1102                 bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
1103         }
1104         /*
1105          * We might need to add a TTM.
1106          */
1107         if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1108                 ret = ttm_tt_create(bo, true);
1109                 if (ret)
1110                         return ret;
1111         }
1112         return 0;
1113 }
1114 EXPORT_SYMBOL(ttm_bo_validate);
1115
1116 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1117                          struct ttm_buffer_object *bo,
1118                          unsigned long size,
1119                          enum ttm_bo_type type,
1120                          struct ttm_placement *placement,
1121                          uint32_t page_alignment,
1122                          struct ttm_operation_ctx *ctx,
1123                          size_t acc_size,
1124                          struct sg_table *sg,
1125                          struct dma_resv *resv,
1126                          void (*destroy) (struct ttm_buffer_object *))
1127 {
1128         struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1129         int ret = 0;
1130         unsigned long num_pages;
1131         bool locked;
1132
1133         ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1134         if (ret) {
1135                 pr_err("Out of kernel memory\n");
1136                 if (destroy)
1137                         (*destroy)(bo);
1138                 else
1139                         kfree(bo);
1140                 return -ENOMEM;
1141         }
1142
1143         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1144         if (num_pages == 0) {
1145                 pr_err("Illegal buffer object size\n");
1146                 if (destroy)
1147                         (*destroy)(bo);
1148                 else
1149                         kfree(bo);
1150                 ttm_mem_global_free(mem_glob, acc_size);
1151                 return -EINVAL;
1152         }
1153         bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1154
1155         kref_init(&bo->kref);
1156         INIT_LIST_HEAD(&bo->lru);
1157         INIT_LIST_HEAD(&bo->ddestroy);
1158         INIT_LIST_HEAD(&bo->swap);
1159         bo->bdev = bdev;
1160         bo->type = type;
1161         bo->num_pages = num_pages;
1162         bo->mem.size = num_pages << PAGE_SHIFT;
1163         bo->mem.mem_type = TTM_PL_SYSTEM;
1164         bo->mem.num_pages = bo->num_pages;
1165         bo->mem.mm_node = NULL;
1166         bo->mem.page_alignment = page_alignment;
1167         bo->mem.bus.offset = 0;
1168         bo->mem.bus.addr = NULL;
1169         bo->moving = NULL;
1170         bo->mem.placement = TTM_PL_FLAG_CACHED;
1171         bo->acc_size = acc_size;
1172         bo->pin_count = 0;
1173         bo->sg = sg;
1174         if (resv) {
1175                 bo->base.resv = resv;
1176                 dma_resv_assert_held(bo->base.resv);
1177         } else {
1178                 bo->base.resv = &bo->base._resv;
1179         }
1180         if (!ttm_bo_uses_embedded_gem_object(bo)) {
1181                 /*
1182                  * bo.gem is not initialized, so we have to setup the
1183                  * struct elements we want use regardless.
1184                  */
1185                 dma_resv_init(&bo->base._resv);
1186                 drm_vma_node_reset(&bo->base.vma_node);
1187         }
1188         atomic_inc(&ttm_bo_glob.bo_count);
1189
1190         /*
1191          * For ttm_bo_type_device buffers, allocate
1192          * address space from the device.
1193          */
1194         if (bo->type == ttm_bo_type_device ||
1195             bo->type == ttm_bo_type_sg)
1196                 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1197                                          bo->mem.num_pages);
1198
1199         /* passed reservation objects should already be locked,
1200          * since otherwise lockdep will be angered in radeon.
1201          */
1202         if (!resv) {
1203                 locked = dma_resv_trylock(bo->base.resv);
1204                 WARN_ON(!locked);
1205         }
1206
1207         if (likely(!ret))
1208                 ret = ttm_bo_validate(bo, placement, ctx);
1209
1210         if (unlikely(ret)) {
1211                 if (!resv)
1212                         ttm_bo_unreserve(bo);
1213
1214                 ttm_bo_put(bo);
1215                 return ret;
1216         }
1217
1218         ttm_bo_move_to_lru_tail_unlocked(bo);
1219
1220         return ret;
1221 }
1222 EXPORT_SYMBOL(ttm_bo_init_reserved);
1223
1224 int ttm_bo_init(struct ttm_bo_device *bdev,
1225                 struct ttm_buffer_object *bo,
1226                 unsigned long size,
1227                 enum ttm_bo_type type,
1228                 struct ttm_placement *placement,
1229                 uint32_t page_alignment,
1230                 bool interruptible,
1231                 size_t acc_size,
1232                 struct sg_table *sg,
1233                 struct dma_resv *resv,
1234                 void (*destroy) (struct ttm_buffer_object *))
1235 {
1236         struct ttm_operation_ctx ctx = { interruptible, false };
1237         int ret;
1238
1239         ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1240                                    page_alignment, &ctx, acc_size,
1241                                    sg, resv, destroy);
1242         if (ret)
1243                 return ret;
1244
1245         if (!resv)
1246                 ttm_bo_unreserve(bo);
1247
1248         return 0;
1249 }
1250 EXPORT_SYMBOL(ttm_bo_init);
1251
1252 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1253                            unsigned long bo_size,
1254                            unsigned struct_size)
1255 {
1256         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1257         size_t size = 0;
1258
1259         size += ttm_round_pot(struct_size);
1260         size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1261         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1262         return size;
1263 }
1264 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1265
1266 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1267 {
1268         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
1269
1270         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1271                 pr_err("Illegal memory manager memory type %u\n", mem_type);
1272                 return -EINVAL;
1273         }
1274
1275         if (!man) {
1276                 pr_err("Memory type %u has not been initialized\n", mem_type);
1277                 return 0;
1278         }
1279
1280         return ttm_resource_manager_force_list_clean(bdev, man);
1281 }
1282 EXPORT_SYMBOL(ttm_bo_evict_mm);
1283
1284 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1285 {
1286         struct ttm_bo_global *glob =
1287                 container_of(kobj, struct ttm_bo_global, kobj);
1288
1289         __free_page(glob->dummy_read_page);
1290 }
1291
1292 static void ttm_bo_global_release(void)
1293 {
1294         struct ttm_bo_global *glob = &ttm_bo_glob;
1295
1296         mutex_lock(&ttm_global_mutex);
1297         if (--ttm_bo_glob_use_count > 0)
1298                 goto out;
1299
1300         kobject_del(&glob->kobj);
1301         kobject_put(&glob->kobj);
1302         ttm_mem_global_release(&ttm_mem_glob);
1303         memset(glob, 0, sizeof(*glob));
1304 out:
1305         mutex_unlock(&ttm_global_mutex);
1306 }
1307
1308 static int ttm_bo_global_init(void)
1309 {
1310         struct ttm_bo_global *glob = &ttm_bo_glob;
1311         int ret = 0;
1312         unsigned i;
1313
1314         mutex_lock(&ttm_global_mutex);
1315         if (++ttm_bo_glob_use_count > 1)
1316                 goto out;
1317
1318         ret = ttm_mem_global_init(&ttm_mem_glob);
1319         if (ret)
1320                 goto out;
1321
1322         spin_lock_init(&glob->lru_lock);
1323         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1324
1325         if (unlikely(glob->dummy_read_page == NULL)) {
1326                 ret = -ENOMEM;
1327                 goto out;
1328         }
1329
1330         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1331                 INIT_LIST_HEAD(&glob->swap_lru[i]);
1332         INIT_LIST_HEAD(&glob->device_list);
1333         atomic_set(&glob->bo_count, 0);
1334
1335         ret = kobject_init_and_add(
1336                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1337         if (unlikely(ret != 0))
1338                 kobject_put(&glob->kobj);
1339 out:
1340         mutex_unlock(&ttm_global_mutex);
1341         return ret;
1342 }
1343
1344 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1345 {
1346         struct ttm_bo_global *glob = &ttm_bo_glob;
1347         int ret = 0;
1348         unsigned i;
1349         struct ttm_resource_manager *man;
1350
1351         man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1352         ttm_resource_manager_set_used(man, false);
1353         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1354
1355         mutex_lock(&ttm_global_mutex);
1356         list_del(&bdev->device_list);
1357         mutex_unlock(&ttm_global_mutex);
1358
1359         cancel_delayed_work_sync(&bdev->wq);
1360
1361         if (ttm_bo_delayed_delete(bdev, true))
1362                 pr_debug("Delayed destroy list was clean\n");
1363
1364         spin_lock(&glob->lru_lock);
1365         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1366                 if (list_empty(&man->lru[0]))
1367                         pr_debug("Swap list %d was clean\n", i);
1368         spin_unlock(&glob->lru_lock);
1369
1370         if (!ret)
1371                 ttm_bo_global_release();
1372
1373         return ret;
1374 }
1375 EXPORT_SYMBOL(ttm_bo_device_release);
1376
1377 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1378 {
1379         struct ttm_resource_manager *man = &bdev->sysman;
1380
1381         /*
1382          * Initialize the system memory buffer type.
1383          * Other types need to be driver / IOCTL initialized.
1384          */
1385         man->use_tt = true;
1386
1387         ttm_resource_manager_init(man, 0);
1388         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1389         ttm_resource_manager_set_used(man, true);
1390 }
1391
1392 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1393                        struct ttm_bo_driver *driver,
1394                        struct address_space *mapping,
1395                        struct drm_vma_offset_manager *vma_manager,
1396                        bool need_dma32)
1397 {
1398         struct ttm_bo_global *glob = &ttm_bo_glob;
1399         int ret;
1400
1401         if (WARN_ON(vma_manager == NULL))
1402                 return -EINVAL;
1403
1404         ret = ttm_bo_global_init();
1405         if (ret)
1406                 return ret;
1407
1408         bdev->driver = driver;
1409
1410         ttm_bo_init_sysman(bdev);
1411
1412         bdev->vma_manager = vma_manager;
1413         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1414         INIT_LIST_HEAD(&bdev->ddestroy);
1415         bdev->dev_mapping = mapping;
1416         bdev->need_dma32 = need_dma32;
1417         mutex_lock(&ttm_global_mutex);
1418         list_add_tail(&bdev->device_list, &glob->device_list);
1419         mutex_unlock(&ttm_global_mutex);
1420
1421         return 0;
1422 }
1423 EXPORT_SYMBOL(ttm_bo_device_init);
1424
1425 /*
1426  * buffer object vm functions.
1427  */
1428
1429 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1430 {
1431         struct ttm_bo_device *bdev = bo->bdev;
1432
1433         drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1434         ttm_mem_io_free(bdev, &bo->mem);
1435 }
1436 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1437
1438 int ttm_bo_wait(struct ttm_buffer_object *bo,
1439                 bool interruptible, bool no_wait)
1440 {
1441         long timeout = 15 * HZ;
1442
1443         if (no_wait) {
1444                 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1445                         return 0;
1446                 else
1447                         return -EBUSY;
1448         }
1449
1450         timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1451                                                       interruptible, timeout);
1452         if (timeout < 0)
1453                 return timeout;
1454
1455         if (timeout == 0)
1456                 return -EBUSY;
1457
1458         dma_resv_add_excl_fence(bo->base.resv, NULL);
1459         return 0;
1460 }
1461 EXPORT_SYMBOL(ttm_bo_wait);
1462
1463 /**
1464  * A buffer object shrink method that tries to swap out the first
1465  * buffer object on the bo_global::swap_lru list.
1466  */
1467 int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1468 {
1469         struct ttm_buffer_object *bo;
1470         int ret = -EBUSY;
1471         bool locked;
1472         unsigned i;
1473
1474         spin_lock(&glob->lru_lock);
1475         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1476                 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1477                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1478                                                             NULL))
1479                                 continue;
1480
1481                         if (!ttm_bo_get_unless_zero(bo)) {
1482                                 if (locked)
1483                                         dma_resv_unlock(bo->base.resv);
1484                                 continue;
1485                         }
1486
1487                         ret = 0;
1488                         break;
1489                 }
1490                 if (!ret)
1491                         break;
1492         }
1493
1494         if (ret) {
1495                 spin_unlock(&glob->lru_lock);
1496                 return ret;
1497         }
1498
1499         if (bo->deleted) {
1500                 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1501                 ttm_bo_put(bo);
1502                 return ret;
1503         }
1504
1505         ttm_bo_del_from_lru(bo);
1506         spin_unlock(&glob->lru_lock);
1507
1508         /**
1509          * Move to system cached
1510          */
1511
1512         if (bo->mem.mem_type != TTM_PL_SYSTEM) {
1513                 struct ttm_operation_ctx ctx = { false, false };
1514                 struct ttm_resource evict_mem;
1515
1516                 evict_mem = bo->mem;
1517                 evict_mem.mm_node = NULL;
1518                 evict_mem.placement = TTM_PL_MASK_CACHING;
1519                 evict_mem.mem_type = TTM_PL_SYSTEM;
1520
1521                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1522                 if (unlikely(ret != 0))
1523                         goto out;
1524         }
1525
1526         /**
1527          * Make sure BO is idle.
1528          */
1529
1530         ret = ttm_bo_wait(bo, false, false);
1531         if (unlikely(ret != 0))
1532                 goto out;
1533
1534         ttm_bo_unmap_virtual(bo);
1535
1536         /**
1537          * Swap out. Buffer will be swapped in again as soon as
1538          * anyone tries to access a ttm page.
1539          */
1540
1541         if (bo->bdev->driver->swap_notify)
1542                 bo->bdev->driver->swap_notify(bo);
1543
1544         ret = ttm_tt_swapout(bo->bdev, bo->ttm);
1545 out:
1546
1547         /**
1548          *
1549          * Unreserve without putting on LRU to avoid swapping out an
1550          * already swapped buffer.
1551          */
1552         if (locked)
1553                 dma_resv_unlock(bo->base.resv);
1554         ttm_bo_put(bo);
1555         return ret;
1556 }
1557 EXPORT_SYMBOL(ttm_bo_swapout);
1558
1559 void ttm_bo_swapout_all(void)
1560 {
1561         struct ttm_operation_ctx ctx = {
1562                 .interruptible = false,
1563                 .no_wait_gpu = false
1564         };
1565
1566         while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
1567 }
1568 EXPORT_SYMBOL(ttm_bo_swapout_all);
1569
1570 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1571 {
1572         if (bo->ttm == NULL)
1573                 return;
1574
1575         ttm_tt_destroy(bo->bdev, bo->ttm);
1576         bo->ttm = NULL;
1577 }
1578
1579 int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
1580 {
1581         return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
1582 }
1583
1584 void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
1585 {
1586         bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
1587 }