Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/reservation.h>
41
42 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43 {
44         ttm_bo_mem_put(bo, &bo->mem);
45 }
46
47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48                     bool evict, bool interruptible,
49                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50 {
51         struct ttm_tt *ttm = bo->ttm;
52         struct ttm_mem_reg *old_mem = &bo->mem;
53         int ret;
54
55         if (old_mem->mem_type != TTM_PL_SYSTEM) {
56                 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
57
58                 if (unlikely(ret != 0)) {
59                         if (ret != -ERESTARTSYS)
60                                 pr_err("Failed to expire sync object before unbinding TTM\n");
61                         return ret;
62                 }
63
64                 ttm_tt_unbind(ttm);
65                 ttm_bo_free_old_node(bo);
66                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
67                                 TTM_PL_MASK_MEM);
68                 old_mem->mem_type = TTM_PL_SYSTEM;
69         }
70
71         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
72         if (unlikely(ret != 0))
73                 return ret;
74
75         if (new_mem->mem_type != TTM_PL_SYSTEM) {
76                 ret = ttm_tt_bind(ttm, new_mem);
77                 if (unlikely(ret != 0))
78                         return ret;
79         }
80
81         *old_mem = *new_mem;
82         new_mem->mm_node = NULL;
83
84         return 0;
85 }
86 EXPORT_SYMBOL(ttm_bo_move_ttm);
87
88 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
89 {
90         if (likely(man->io_reserve_fastpath))
91                 return 0;
92
93         if (interruptible)
94                 return mutex_lock_interruptible(&man->io_reserve_mutex);
95
96         mutex_lock(&man->io_reserve_mutex);
97         return 0;
98 }
99 EXPORT_SYMBOL(ttm_mem_io_lock);
100
101 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
102 {
103         if (likely(man->io_reserve_fastpath))
104                 return;
105
106         mutex_unlock(&man->io_reserve_mutex);
107 }
108 EXPORT_SYMBOL(ttm_mem_io_unlock);
109
110 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111 {
112         struct ttm_buffer_object *bo;
113
114         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
115                 return -EAGAIN;
116
117         bo = list_first_entry(&man->io_reserve_lru,
118                               struct ttm_buffer_object,
119                               io_reserve_lru);
120         list_del_init(&bo->io_reserve_lru);
121         ttm_bo_unmap_virtual_locked(bo);
122
123         return 0;
124 }
125
126
127 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
128                        struct ttm_mem_reg *mem)
129 {
130         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
131         int ret = 0;
132
133         if (!bdev->driver->io_mem_reserve)
134                 return 0;
135         if (likely(man->io_reserve_fastpath))
136                 return bdev->driver->io_mem_reserve(bdev, mem);
137
138         if (bdev->driver->io_mem_reserve &&
139             mem->bus.io_reserved_count++ == 0) {
140 retry:
141                 ret = bdev->driver->io_mem_reserve(bdev, mem);
142                 if (ret == -EAGAIN) {
143                         ret = ttm_mem_io_evict(man);
144                         if (ret == 0)
145                                 goto retry;
146                 }
147         }
148         return ret;
149 }
150 EXPORT_SYMBOL(ttm_mem_io_reserve);
151
152 void ttm_mem_io_free(struct ttm_bo_device *bdev,
153                      struct ttm_mem_reg *mem)
154 {
155         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
156
157         if (likely(man->io_reserve_fastpath))
158                 return;
159
160         if (bdev->driver->io_mem_reserve &&
161             --mem->bus.io_reserved_count == 0 &&
162             bdev->driver->io_mem_free)
163                 bdev->driver->io_mem_free(bdev, mem);
164
165 }
166 EXPORT_SYMBOL(ttm_mem_io_free);
167
168 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
169 {
170         struct ttm_mem_reg *mem = &bo->mem;
171         int ret;
172
173         if (!mem->bus.io_reserved_vm) {
174                 struct ttm_mem_type_manager *man =
175                         &bo->bdev->man[mem->mem_type];
176
177                 ret = ttm_mem_io_reserve(bo->bdev, mem);
178                 if (unlikely(ret != 0))
179                         return ret;
180                 mem->bus.io_reserved_vm = true;
181                 if (man->use_io_reserve_lru)
182                         list_add_tail(&bo->io_reserve_lru,
183                                       &man->io_reserve_lru);
184         }
185         return 0;
186 }
187
188 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
189 {
190         struct ttm_mem_reg *mem = &bo->mem;
191
192         if (mem->bus.io_reserved_vm) {
193                 mem->bus.io_reserved_vm = false;
194                 list_del_init(&bo->io_reserve_lru);
195                 ttm_mem_io_free(bo->bdev, mem);
196         }
197 }
198
199 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
200                         void **virtual)
201 {
202         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
203         int ret;
204         void *addr;
205
206         *virtual = NULL;
207         (void) ttm_mem_io_lock(man, false);
208         ret = ttm_mem_io_reserve(bdev, mem);
209         ttm_mem_io_unlock(man);
210         if (ret || !mem->bus.is_iomem)
211                 return ret;
212
213         if (mem->bus.addr) {
214                 addr = mem->bus.addr;
215         } else {
216                 if (mem->placement & TTM_PL_FLAG_WC)
217                         addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
218                 else
219                         addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
220                 if (!addr) {
221                         (void) ttm_mem_io_lock(man, false);
222                         ttm_mem_io_free(bdev, mem);
223                         ttm_mem_io_unlock(man);
224                         return -ENOMEM;
225                 }
226         }
227         *virtual = addr;
228         return 0;
229 }
230
231 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
232                          void *virtual)
233 {
234         struct ttm_mem_type_manager *man;
235
236         man = &bdev->man[mem->mem_type];
237
238         if (virtual && mem->bus.addr == NULL)
239                 iounmap(virtual);
240         (void) ttm_mem_io_lock(man, false);
241         ttm_mem_io_free(bdev, mem);
242         ttm_mem_io_unlock(man);
243 }
244
245 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
246 {
247         uint32_t *dstP =
248             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
249         uint32_t *srcP =
250             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
251
252         int i;
253         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254                 iowrite32(ioread32(srcP++), dstP++);
255         return 0;
256 }
257
258 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
259                                 unsigned long page,
260                                 pgprot_t prot)
261 {
262         struct page *d = ttm->pages[page];
263         void *dst;
264
265         if (!d)
266                 return -ENOMEM;
267
268         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
269
270 #ifdef CONFIG_X86
271         dst = kmap_atomic_prot(d, prot);
272 #else
273         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
274                 dst = vmap(&d, 1, 0, prot);
275         else
276                 dst = kmap(d);
277 #endif
278         if (!dst)
279                 return -ENOMEM;
280
281         memcpy_fromio(dst, src, PAGE_SIZE);
282
283 #ifdef CONFIG_X86
284         kunmap_atomic(dst);
285 #else
286         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
287                 vunmap(dst);
288         else
289                 kunmap(d);
290 #endif
291
292         return 0;
293 }
294
295 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
296                                 unsigned long page,
297                                 pgprot_t prot)
298 {
299         struct page *s = ttm->pages[page];
300         void *src;
301
302         if (!s)
303                 return -ENOMEM;
304
305         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
306 #ifdef CONFIG_X86
307         src = kmap_atomic_prot(s, prot);
308 #else
309         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
310                 src = vmap(&s, 1, 0, prot);
311         else
312                 src = kmap(s);
313 #endif
314         if (!src)
315                 return -ENOMEM;
316
317         memcpy_toio(dst, src, PAGE_SIZE);
318
319 #ifdef CONFIG_X86
320         kunmap_atomic(src);
321 #else
322         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
323                 vunmap(src);
324         else
325                 kunmap(s);
326 #endif
327
328         return 0;
329 }
330
331 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
332                        bool evict, bool interruptible,
333                        bool no_wait_gpu,
334                        struct ttm_mem_reg *new_mem)
335 {
336         struct ttm_bo_device *bdev = bo->bdev;
337         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
338         struct ttm_tt *ttm = bo->ttm;
339         struct ttm_mem_reg *old_mem = &bo->mem;
340         struct ttm_mem_reg old_copy = *old_mem;
341         void *old_iomap;
342         void *new_iomap;
343         int ret;
344         unsigned long i;
345         unsigned long page;
346         unsigned long add = 0;
347         int dir;
348
349         ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
350         if (ret)
351                 return ret;
352
353         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
354         if (ret)
355                 return ret;
356         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
357         if (ret)
358                 goto out;
359
360         /*
361          * Single TTM move. NOP.
362          */
363         if (old_iomap == NULL && new_iomap == NULL)
364                 goto out2;
365
366         /*
367          * Don't move nonexistent data. Clear destination instead.
368          */
369         if (old_iomap == NULL &&
370             (ttm == NULL || (ttm->state == tt_unpopulated &&
371                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
372                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
373                 goto out2;
374         }
375
376         /*
377          * TTM might be null for moves within the same region.
378          */
379         if (ttm && ttm->state == tt_unpopulated) {
380                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
381                 if (ret)
382                         goto out1;
383         }
384
385         add = 0;
386         dir = 1;
387
388         if ((old_mem->mem_type == new_mem->mem_type) &&
389             (new_mem->start < old_mem->start + old_mem->size)) {
390                 dir = -1;
391                 add = new_mem->num_pages - 1;
392         }
393
394         for (i = 0; i < new_mem->num_pages; ++i) {
395                 page = i * dir + add;
396                 if (old_iomap == NULL) {
397                         pgprot_t prot = ttm_io_prot(old_mem->placement,
398                                                     PAGE_KERNEL);
399                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
400                                                    prot);
401                 } else if (new_iomap == NULL) {
402                         pgprot_t prot = ttm_io_prot(new_mem->placement,
403                                                     PAGE_KERNEL);
404                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
405                                                    prot);
406                 } else
407                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
408                 if (ret)
409                         goto out1;
410         }
411         mb();
412 out2:
413         old_copy = *old_mem;
414         *old_mem = *new_mem;
415         new_mem->mm_node = NULL;
416
417         if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
418                 ttm_tt_destroy(ttm);
419                 bo->ttm = NULL;
420         }
421
422 out1:
423         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
424 out:
425         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
426
427         /*
428          * On error, keep the mm node!
429          */
430         if (!ret)
431                 ttm_bo_mem_put(bo, &old_copy);
432         return ret;
433 }
434 EXPORT_SYMBOL(ttm_bo_move_memcpy);
435
436 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
437 {
438         kfree(bo);
439 }
440
441 /**
442  * ttm_buffer_object_transfer
443  *
444  * @bo: A pointer to a struct ttm_buffer_object.
445  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
446  * holding the data of @bo with the old placement.
447  *
448  * This is a utility function that may be called after an accelerated move
449  * has been scheduled. A new buffer object is created as a placeholder for
450  * the old data while it's being copied. When that buffer object is idle,
451  * it can be destroyed, releasing the space of the old placement.
452  * Returns:
453  * !0: Failure.
454  */
455
456 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
457                                       struct ttm_buffer_object **new_obj)
458 {
459         struct ttm_buffer_object *fbo;
460         int ret;
461
462         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
463         if (!fbo)
464                 return -ENOMEM;
465
466         *fbo = *bo;
467
468         /**
469          * Fix up members that we shouldn't copy directly:
470          * TODO: Explicit member copy would probably be better here.
471          */
472
473         INIT_LIST_HEAD(&fbo->ddestroy);
474         INIT_LIST_HEAD(&fbo->lru);
475         INIT_LIST_HEAD(&fbo->swap);
476         INIT_LIST_HEAD(&fbo->io_reserve_lru);
477         fbo->moving = NULL;
478         drm_vma_node_reset(&fbo->vma_node);
479         atomic_set(&fbo->cpu_writers, 0);
480
481         kref_init(&fbo->list_kref);
482         kref_init(&fbo->kref);
483         fbo->destroy = &ttm_transfered_destroy;
484         fbo->acc_size = 0;
485         fbo->resv = &fbo->ttm_resv;
486         reservation_object_init(fbo->resv);
487         ret = ww_mutex_trylock(&fbo->resv->lock);
488         WARN_ON(!ret);
489
490         *new_obj = fbo;
491         return 0;
492 }
493
494 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
495 {
496         /* Cached mappings need no adjustment */
497         if (caching_flags & TTM_PL_FLAG_CACHED)
498                 return tmp;
499
500 #if defined(__i386__) || defined(__x86_64__)
501         if (caching_flags & TTM_PL_FLAG_WC)
502                 tmp = pgprot_writecombine(tmp);
503         else if (boot_cpu_data.x86 > 3)
504                 tmp = pgprot_noncached(tmp);
505 #endif
506 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
507     defined(__powerpc__)
508         if (caching_flags & TTM_PL_FLAG_WC)
509                 tmp = pgprot_writecombine(tmp);
510         else
511                 tmp = pgprot_noncached(tmp);
512 #endif
513 #if defined(__sparc__) || defined(__mips__)
514         tmp = pgprot_noncached(tmp);
515 #endif
516         return tmp;
517 }
518 EXPORT_SYMBOL(ttm_io_prot);
519
520 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
521                           unsigned long offset,
522                           unsigned long size,
523                           struct ttm_bo_kmap_obj *map)
524 {
525         struct ttm_mem_reg *mem = &bo->mem;
526
527         if (bo->mem.bus.addr) {
528                 map->bo_kmap_type = ttm_bo_map_premapped;
529                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
530         } else {
531                 map->bo_kmap_type = ttm_bo_map_iomap;
532                 if (mem->placement & TTM_PL_FLAG_WC)
533                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
534                                                   size);
535                 else
536                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
537                                                        size);
538         }
539         return (!map->virtual) ? -ENOMEM : 0;
540 }
541
542 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
543                            unsigned long start_page,
544                            unsigned long num_pages,
545                            struct ttm_bo_kmap_obj *map)
546 {
547         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
548         struct ttm_tt *ttm = bo->ttm;
549         int ret;
550
551         BUG_ON(!ttm);
552
553         if (ttm->state == tt_unpopulated) {
554                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
555                 if (ret)
556                         return ret;
557         }
558
559         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
560                 /*
561                  * We're mapping a single page, and the desired
562                  * page protection is consistent with the bo.
563                  */
564
565                 map->bo_kmap_type = ttm_bo_map_kmap;
566                 map->page = ttm->pages[start_page];
567                 map->virtual = kmap(map->page);
568         } else {
569                 /*
570                  * We need to use vmap to get the desired page protection
571                  * or to make the buffer object look contiguous.
572                  */
573                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
574                 map->bo_kmap_type = ttm_bo_map_vmap;
575                 map->virtual = vmap(ttm->pages + start_page, num_pages,
576                                     0, prot);
577         }
578         return (!map->virtual) ? -ENOMEM : 0;
579 }
580
581 int ttm_bo_kmap(struct ttm_buffer_object *bo,
582                 unsigned long start_page, unsigned long num_pages,
583                 struct ttm_bo_kmap_obj *map)
584 {
585         struct ttm_mem_type_manager *man =
586                 &bo->bdev->man[bo->mem.mem_type];
587         unsigned long offset, size;
588         int ret;
589
590         BUG_ON(!list_empty(&bo->swap));
591         map->virtual = NULL;
592         map->bo = bo;
593         if (num_pages > bo->num_pages)
594                 return -EINVAL;
595         if (start_page > bo->num_pages)
596                 return -EINVAL;
597 #if 0
598         if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
599                 return -EPERM;
600 #endif
601         (void) ttm_mem_io_lock(man, false);
602         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
603         ttm_mem_io_unlock(man);
604         if (ret)
605                 return ret;
606         if (!bo->mem.bus.is_iomem) {
607                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
608         } else {
609                 offset = start_page << PAGE_SHIFT;
610                 size = num_pages << PAGE_SHIFT;
611                 return ttm_bo_ioremap(bo, offset, size, map);
612         }
613 }
614 EXPORT_SYMBOL(ttm_bo_kmap);
615
616 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
617 {
618         struct ttm_buffer_object *bo = map->bo;
619         struct ttm_mem_type_manager *man =
620                 &bo->bdev->man[bo->mem.mem_type];
621
622         if (!map->virtual)
623                 return;
624         switch (map->bo_kmap_type) {
625         case ttm_bo_map_iomap:
626                 iounmap(map->virtual);
627                 break;
628         case ttm_bo_map_vmap:
629                 vunmap(map->virtual);
630                 break;
631         case ttm_bo_map_kmap:
632                 kunmap(map->page);
633                 break;
634         case ttm_bo_map_premapped:
635                 break;
636         default:
637                 BUG();
638         }
639         (void) ttm_mem_io_lock(man, false);
640         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
641         ttm_mem_io_unlock(man);
642         map->virtual = NULL;
643         map->page = NULL;
644 }
645 EXPORT_SYMBOL(ttm_bo_kunmap);
646
647 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
648                               struct fence *fence,
649                               bool evict,
650                               struct ttm_mem_reg *new_mem)
651 {
652         struct ttm_bo_device *bdev = bo->bdev;
653         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
654         struct ttm_mem_reg *old_mem = &bo->mem;
655         int ret;
656         struct ttm_buffer_object *ghost_obj;
657
658         reservation_object_add_excl_fence(bo->resv, fence);
659         if (evict) {
660                 ret = ttm_bo_wait(bo, false, false);
661                 if (ret)
662                         return ret;
663
664                 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
665                         ttm_tt_destroy(bo->ttm);
666                         bo->ttm = NULL;
667                 }
668                 ttm_bo_free_old_node(bo);
669         } else {
670                 /**
671                  * This should help pipeline ordinary buffer moves.
672                  *
673                  * Hang old buffer memory on a new buffer object,
674                  * and leave it to be released when the GPU
675                  * operation has completed.
676                  */
677
678                 fence_put(bo->moving);
679                 bo->moving = fence_get(fence);
680
681                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
682                 if (ret)
683                         return ret;
684
685                 reservation_object_add_excl_fence(ghost_obj->resv, fence);
686
687                 /**
688                  * If we're not moving to fixed memory, the TTM object
689                  * needs to stay alive. Otherwhise hang it on the ghost
690                  * bo to be unbound and destroyed.
691                  */
692
693                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
694                         ghost_obj->ttm = NULL;
695                 else
696                         bo->ttm = NULL;
697
698                 ttm_bo_unreserve(ghost_obj);
699                 ttm_bo_unref(&ghost_obj);
700         }
701
702         *old_mem = *new_mem;
703         new_mem->mm_node = NULL;
704
705         return 0;
706 }
707 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
708
709 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
710                          struct fence *fence, bool evict,
711                          struct ttm_mem_reg *new_mem)
712 {
713         struct ttm_bo_device *bdev = bo->bdev;
714         struct ttm_mem_reg *old_mem = &bo->mem;
715
716         struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
717         struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
718
719         int ret;
720
721         reservation_object_add_excl_fence(bo->resv, fence);
722
723         if (!evict) {
724                 struct ttm_buffer_object *ghost_obj;
725
726                 /**
727                  * This should help pipeline ordinary buffer moves.
728                  *
729                  * Hang old buffer memory on a new buffer object,
730                  * and leave it to be released when the GPU
731                  * operation has completed.
732                  */
733
734                 fence_put(bo->moving);
735                 bo->moving = fence_get(fence);
736
737                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
738                 if (ret)
739                         return ret;
740
741                 reservation_object_add_excl_fence(ghost_obj->resv, fence);
742
743                 /**
744                  * If we're not moving to fixed memory, the TTM object
745                  * needs to stay alive. Otherwhise hang it on the ghost
746                  * bo to be unbound and destroyed.
747                  */
748
749                 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
750                         ghost_obj->ttm = NULL;
751                 else
752                         bo->ttm = NULL;
753
754                 ttm_bo_unreserve(ghost_obj);
755                 ttm_bo_unref(&ghost_obj);
756
757         } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
758
759                 /**
760                  * BO doesn't have a TTM we need to bind/unbind. Just remember
761                  * this eviction and free up the allocation
762                  */
763
764                 spin_lock(&from->move_lock);
765                 if (!from->move || fence_is_later(fence, from->move)) {
766                         fence_put(from->move);
767                         from->move = fence_get(fence);
768                 }
769                 spin_unlock(&from->move_lock);
770
771                 ttm_bo_free_old_node(bo);
772
773                 fence_put(bo->moving);
774                 bo->moving = fence_get(fence);
775
776         } else {
777                 /**
778                  * Last resort, wait for the move to be completed.
779                  *
780                  * Should never happen in pratice.
781                  */
782
783                 ret = ttm_bo_wait(bo, false, false);
784                 if (ret)
785                         return ret;
786
787                 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
788                         ttm_tt_destroy(bo->ttm);
789                         bo->ttm = NULL;
790                 }
791                 ttm_bo_free_old_node(bo);
792         }
793
794         *old_mem = *new_mem;
795         new_mem->mm_node = NULL;
796
797         return 0;
798 }
799 EXPORT_SYMBOL(ttm_bo_pipeline_move);