Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / radeon / radeon_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
36 #include <drm/drmP.h>
37 #include <drm/radeon_drm.h>
38 #include <linux/seq_file.h>
39 #include "radeon_reg.h"
40 #include "radeon.h"
41
42 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
43
44 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
45
46 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
47 {
48         struct radeon_mman *mman;
49         struct radeon_device *rdev;
50
51         mman = container_of(bdev, struct radeon_mman, bdev);
52         rdev = container_of(mman, struct radeon_device, mman);
53         return rdev;
54 }
55
56
57 /*
58  * Global memory.
59  */
60 static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
61 {
62         return ttm_mem_global_init(ref->object);
63 }
64
65 static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
66 {
67         ttm_mem_global_release(ref->object);
68 }
69
70 static int radeon_ttm_global_init(struct radeon_device *rdev)
71 {
72         struct ttm_global_reference *global_ref;
73         int r;
74
75         rdev->mman.mem_global_referenced = false;
76         global_ref = &rdev->mman.mem_global_ref;
77         global_ref->global_type = TTM_GLOBAL_TTM_MEM;
78         global_ref->size = sizeof(struct ttm_mem_global);
79         global_ref->init = &radeon_ttm_mem_global_init;
80         global_ref->release = &radeon_ttm_mem_global_release;
81         r = ttm_global_item_ref(global_ref);
82         if (r != 0) {
83                 DRM_ERROR("Failed setting up TTM memory accounting "
84                           "subsystem.\n");
85                 return r;
86         }
87
88         rdev->mman.bo_global_ref.mem_glob =
89                 rdev->mman.mem_global_ref.object;
90         global_ref = &rdev->mman.bo_global_ref.ref;
91         global_ref->global_type = TTM_GLOBAL_TTM_BO;
92         global_ref->size = sizeof(struct ttm_bo_global);
93         global_ref->init = &ttm_bo_global_init;
94         global_ref->release = &ttm_bo_global_release;
95         r = ttm_global_item_ref(global_ref);
96         if (r != 0) {
97                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98                 ttm_global_item_unref(&rdev->mman.mem_global_ref);
99                 return r;
100         }
101
102         rdev->mman.mem_global_referenced = true;
103         return 0;
104 }
105
106 static void radeon_ttm_global_fini(struct radeon_device *rdev)
107 {
108         if (rdev->mman.mem_global_referenced) {
109                 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
110                 ttm_global_item_unref(&rdev->mman.mem_global_ref);
111                 rdev->mman.mem_global_referenced = false;
112         }
113 }
114
115 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
116
117 static struct ttm_backend*
118 radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
119 {
120         struct radeon_device *rdev;
121
122         rdev = radeon_get_rdev(bdev);
123 #if __OS_HAS_AGP
124         if (rdev->flags & RADEON_IS_AGP) {
125                 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
126         } else
127 #endif
128         {
129                 return radeon_ttm_backend_create(rdev);
130         }
131 }
132
133 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
134 {
135         return 0;
136 }
137
138 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
139                                 struct ttm_mem_type_manager *man)
140 {
141         struct radeon_device *rdev;
142
143         rdev = radeon_get_rdev(bdev);
144
145         switch (type) {
146         case TTM_PL_SYSTEM:
147                 /* System memory */
148                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
149                 man->available_caching = TTM_PL_MASK_CACHING;
150                 man->default_caching = TTM_PL_FLAG_CACHED;
151                 break;
152         case TTM_PL_TT:
153                 man->gpu_offset = rdev->mc.gtt_location;
154                 man->available_caching = TTM_PL_MASK_CACHING;
155                 man->default_caching = TTM_PL_FLAG_CACHED;
156                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
157 #if __OS_HAS_AGP
158                 if (rdev->flags & RADEON_IS_AGP) {
159                         if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
160                                 DRM_ERROR("AGP is not enabled for memory type %u\n",
161                                           (unsigned)type);
162                                 return -EINVAL;
163                         }
164                         man->io_offset = rdev->mc.agp_base;
165                         man->io_size = rdev->mc.gtt_size;
166                         man->io_addr = NULL;
167                         if (!rdev->ddev->agp->cant_use_aperture)
168                                 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
169                                              TTM_MEMTYPE_FLAG_MAPPABLE;
170                         man->available_caching = TTM_PL_FLAG_UNCACHED |
171                                                  TTM_PL_FLAG_WC;
172                         man->default_caching = TTM_PL_FLAG_WC;
173                 } else
174 #endif
175                 {
176                         man->io_offset = 0;
177                         man->io_size = 0;
178                         man->io_addr = NULL;
179                 }
180                 break;
181         case TTM_PL_VRAM:
182                 /* "On-card" video ram */
183                 man->gpu_offset = rdev->mc.vram_location;
184                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185                              TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186                              TTM_MEMTYPE_FLAG_MAPPABLE;
187                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
188                 man->default_caching = TTM_PL_FLAG_WC;
189                 man->io_addr = NULL;
190                 man->io_offset = rdev->mc.aper_base;
191                 man->io_size = rdev->mc.aper_size;
192                 break;
193         default:
194                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
195                 return -EINVAL;
196         }
197         return 0;
198 }
199
200 static void radeon_evict_flags(struct ttm_buffer_object *bo,
201                                 struct ttm_placement *placement)
202 {
203         struct radeon_bo *rbo;
204         static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
205
206         if (!radeon_ttm_bo_is_radeon_bo(bo)) {
207                 placement->fpfn = 0;
208                 placement->lpfn = 0;
209                 placement->placement = &placements;
210                 placement->busy_placement = &placements;
211                 placement->num_placement = 1;
212                 placement->num_busy_placement = 1;
213                 return;
214         }
215         rbo = container_of(bo, struct radeon_bo, tbo);
216         switch (bo->mem.mem_type) {
217         case TTM_PL_VRAM:
218                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
219                 break;
220         case TTM_PL_TT:
221         default:
222                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
223         }
224         *placement = rbo->placement;
225 }
226
227 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
228 {
229         return 0;
230 }
231
232 static void radeon_move_null(struct ttm_buffer_object *bo,
233                              struct ttm_mem_reg *new_mem)
234 {
235         struct ttm_mem_reg *old_mem = &bo->mem;
236
237         BUG_ON(old_mem->mm_node != NULL);
238         *old_mem = *new_mem;
239         new_mem->mm_node = NULL;
240 }
241
242 static int radeon_move_blit(struct ttm_buffer_object *bo,
243                             bool evict, int no_wait,
244                             struct ttm_mem_reg *new_mem,
245                             struct ttm_mem_reg *old_mem)
246 {
247         struct radeon_device *rdev;
248         uint64_t old_start, new_start;
249         struct radeon_fence *fence;
250         int r;
251
252         rdev = radeon_get_rdev(bo->bdev);
253         r = radeon_fence_create(rdev, &fence);
254         if (unlikely(r)) {
255                 return r;
256         }
257         old_start = old_mem->mm_node->start << PAGE_SHIFT;
258         new_start = new_mem->mm_node->start << PAGE_SHIFT;
259
260         switch (old_mem->mem_type) {
261         case TTM_PL_VRAM:
262                 old_start += rdev->mc.vram_location;
263                 break;
264         case TTM_PL_TT:
265                 old_start += rdev->mc.gtt_location;
266                 break;
267         default:
268                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
269                 return -EINVAL;
270         }
271         switch (new_mem->mem_type) {
272         case TTM_PL_VRAM:
273                 new_start += rdev->mc.vram_location;
274                 break;
275         case TTM_PL_TT:
276                 new_start += rdev->mc.gtt_location;
277                 break;
278         default:
279                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
280                 return -EINVAL;
281         }
282         if (!rdev->cp.ready) {
283                 DRM_ERROR("Trying to move memory with CP turned off.\n");
284                 return -EINVAL;
285         }
286         r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
287         /* FIXME: handle copy error */
288         r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
289                                       evict, no_wait, new_mem);
290         radeon_fence_unref(&fence);
291         return r;
292 }
293
294 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
295                                 bool evict, bool interruptible, bool no_wait,
296                                 struct ttm_mem_reg *new_mem)
297 {
298         struct radeon_device *rdev;
299         struct ttm_mem_reg *old_mem = &bo->mem;
300         struct ttm_mem_reg tmp_mem;
301         u32 placements;
302         struct ttm_placement placement;
303         int r;
304
305         rdev = radeon_get_rdev(bo->bdev);
306         tmp_mem = *new_mem;
307         tmp_mem.mm_node = NULL;
308         placement.fpfn = 0;
309         placement.lpfn = 0;
310         placement.num_placement = 1;
311         placement.placement = &placements;
312         placement.num_busy_placement = 1;
313         placement.busy_placement = &placements;
314         placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
315         r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
316                              interruptible, no_wait);
317         if (unlikely(r)) {
318                 return r;
319         }
320
321         r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
322         if (unlikely(r)) {
323                 goto out_cleanup;
324         }
325
326         r = ttm_tt_bind(bo->ttm, &tmp_mem);
327         if (unlikely(r)) {
328                 goto out_cleanup;
329         }
330         r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
331         if (unlikely(r)) {
332                 goto out_cleanup;
333         }
334         r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
335 out_cleanup:
336         if (tmp_mem.mm_node) {
337                 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
338
339                 spin_lock(&glob->lru_lock);
340                 drm_mm_put_block(tmp_mem.mm_node);
341                 spin_unlock(&glob->lru_lock);
342                 return r;
343         }
344         return r;
345 }
346
347 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
348                                 bool evict, bool interruptible, bool no_wait,
349                                 struct ttm_mem_reg *new_mem)
350 {
351         struct radeon_device *rdev;
352         struct ttm_mem_reg *old_mem = &bo->mem;
353         struct ttm_mem_reg tmp_mem;
354         struct ttm_placement placement;
355         u32 placements;
356         int r;
357
358         rdev = radeon_get_rdev(bo->bdev);
359         tmp_mem = *new_mem;
360         tmp_mem.mm_node = NULL;
361         placement.fpfn = 0;
362         placement.lpfn = 0;
363         placement.num_placement = 1;
364         placement.placement = &placements;
365         placement.num_busy_placement = 1;
366         placement.busy_placement = &placements;
367         placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
368         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
369         if (unlikely(r)) {
370                 return r;
371         }
372         r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
373         if (unlikely(r)) {
374                 goto out_cleanup;
375         }
376         r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
377         if (unlikely(r)) {
378                 goto out_cleanup;
379         }
380 out_cleanup:
381         if (tmp_mem.mm_node) {
382                 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
383
384                 spin_lock(&glob->lru_lock);
385                 drm_mm_put_block(tmp_mem.mm_node);
386                 spin_unlock(&glob->lru_lock);
387                 return r;
388         }
389         return r;
390 }
391
392 static int radeon_bo_move(struct ttm_buffer_object *bo,
393                           bool evict, bool interruptible, bool no_wait,
394                           struct ttm_mem_reg *new_mem)
395 {
396         struct radeon_device *rdev;
397         struct ttm_mem_reg *old_mem = &bo->mem;
398         int r;
399
400         rdev = radeon_get_rdev(bo->bdev);
401         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
402                 radeon_move_null(bo, new_mem);
403                 return 0;
404         }
405         if ((old_mem->mem_type == TTM_PL_TT &&
406              new_mem->mem_type == TTM_PL_SYSTEM) ||
407             (old_mem->mem_type == TTM_PL_SYSTEM &&
408              new_mem->mem_type == TTM_PL_TT)) {
409                 /* bind is enough */
410                 radeon_move_null(bo, new_mem);
411                 return 0;
412         }
413         if (!rdev->cp.ready || rdev->asic->copy == NULL) {
414                 /* use memcpy */
415                 goto memcpy;
416         }
417
418         if (old_mem->mem_type == TTM_PL_VRAM &&
419             new_mem->mem_type == TTM_PL_SYSTEM) {
420                 r = radeon_move_vram_ram(bo, evict, interruptible,
421                                             no_wait, new_mem);
422         } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
423                    new_mem->mem_type == TTM_PL_VRAM) {
424                 r = radeon_move_ram_vram(bo, evict, interruptible,
425                                             no_wait, new_mem);
426         } else {
427                 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
428         }
429
430         if (r) {
431 memcpy:
432                 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
433         }
434
435         return r;
436 }
437
438 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
439                                 bool lazy, bool interruptible)
440 {
441         return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
442 }
443
444 static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
445 {
446         return 0;
447 }
448
449 static void radeon_sync_obj_unref(void **sync_obj)
450 {
451         radeon_fence_unref((struct radeon_fence **)sync_obj);
452 }
453
454 static void *radeon_sync_obj_ref(void *sync_obj)
455 {
456         return radeon_fence_ref((struct radeon_fence *)sync_obj);
457 }
458
459 static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
460 {
461         return radeon_fence_signaled((struct radeon_fence *)sync_obj);
462 }
463
464 static struct ttm_bo_driver radeon_bo_driver = {
465         .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
466         .invalidate_caches = &radeon_invalidate_caches,
467         .init_mem_type = &radeon_init_mem_type,
468         .evict_flags = &radeon_evict_flags,
469         .move = &radeon_bo_move,
470         .verify_access = &radeon_verify_access,
471         .sync_obj_signaled = &radeon_sync_obj_signaled,
472         .sync_obj_wait = &radeon_sync_obj_wait,
473         .sync_obj_flush = &radeon_sync_obj_flush,
474         .sync_obj_unref = &radeon_sync_obj_unref,
475         .sync_obj_ref = &radeon_sync_obj_ref,
476         .move_notify = &radeon_bo_move_notify,
477         .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
478 };
479
480 int radeon_ttm_init(struct radeon_device *rdev)
481 {
482         int r;
483
484         r = radeon_ttm_global_init(rdev);
485         if (r) {
486                 return r;
487         }
488         /* No others user of address space so set it to 0 */
489         r = ttm_bo_device_init(&rdev->mman.bdev,
490                                rdev->mman.bo_global_ref.ref.object,
491                                &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
492                                rdev->need_dma32);
493         if (r) {
494                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
495                 return r;
496         }
497         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
498                                 rdev->mc.real_vram_size >> PAGE_SHIFT);
499         if (r) {
500                 DRM_ERROR("Failed initializing VRAM heap.\n");
501                 return r;
502         }
503         r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
504                                 RADEON_GEM_DOMAIN_VRAM,
505                                 &rdev->stollen_vga_memory);
506         if (r) {
507                 return r;
508         }
509         r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
510         if (r)
511                 return r;
512         r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
513         radeon_bo_unreserve(rdev->stollen_vga_memory);
514         if (r) {
515                 radeon_bo_unref(&rdev->stollen_vga_memory);
516                 return r;
517         }
518         DRM_INFO("radeon: %uM of VRAM memory ready\n",
519                  (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
520         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
521                                 rdev->mc.gtt_size >> PAGE_SHIFT);
522         if (r) {
523                 DRM_ERROR("Failed initializing GTT heap.\n");
524                 return r;
525         }
526         DRM_INFO("radeon: %uM of GTT memory ready.\n",
527                  (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
528         if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
529                 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
530         }
531
532         r = radeon_ttm_debugfs_init(rdev);
533         if (r) {
534                 DRM_ERROR("Failed to init debugfs\n");
535                 return r;
536         }
537         return 0;
538 }
539
540 void radeon_ttm_fini(struct radeon_device *rdev)
541 {
542         int r;
543
544         if (rdev->stollen_vga_memory) {
545                 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
546                 if (r == 0) {
547                         radeon_bo_unpin(rdev->stollen_vga_memory);
548                         radeon_bo_unreserve(rdev->stollen_vga_memory);
549                 }
550                 radeon_bo_unref(&rdev->stollen_vga_memory);
551         }
552         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
553         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
554         ttm_bo_device_release(&rdev->mman.bdev);
555         radeon_gart_fini(rdev);
556         radeon_ttm_global_fini(rdev);
557         DRM_INFO("radeon: ttm finalized\n");
558 }
559
560 static struct vm_operations_struct radeon_ttm_vm_ops;
561 static const struct vm_operations_struct *ttm_vm_ops = NULL;
562
563 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
564 {
565         struct ttm_buffer_object *bo;
566         int r;
567
568         bo = (struct ttm_buffer_object *)vma->vm_private_data;
569         if (bo == NULL) {
570                 return VM_FAULT_NOPAGE;
571         }
572         r = ttm_vm_ops->fault(vma, vmf);
573         return r;
574 }
575
576 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
577 {
578         struct drm_file *file_priv;
579         struct radeon_device *rdev;
580         int r;
581
582         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
583                 return drm_mmap(filp, vma);
584         }
585
586         file_priv = (struct drm_file *)filp->private_data;
587         rdev = file_priv->minor->dev->dev_private;
588         if (rdev == NULL) {
589                 return -EINVAL;
590         }
591         r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
592         if (unlikely(r != 0)) {
593                 return r;
594         }
595         if (unlikely(ttm_vm_ops == NULL)) {
596                 ttm_vm_ops = vma->vm_ops;
597                 radeon_ttm_vm_ops = *ttm_vm_ops;
598                 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
599         }
600         vma->vm_ops = &radeon_ttm_vm_ops;
601         return 0;
602 }
603
604
605 /*
606  * TTM backend functions.
607  */
608 struct radeon_ttm_backend {
609         struct ttm_backend              backend;
610         struct radeon_device            *rdev;
611         unsigned long                   num_pages;
612         struct page                     **pages;
613         struct page                     *dummy_read_page;
614         bool                            populated;
615         bool                            bound;
616         unsigned                        offset;
617 };
618
619 static int radeon_ttm_backend_populate(struct ttm_backend *backend,
620                                        unsigned long num_pages,
621                                        struct page **pages,
622                                        struct page *dummy_read_page)
623 {
624         struct radeon_ttm_backend *gtt;
625
626         gtt = container_of(backend, struct radeon_ttm_backend, backend);
627         gtt->pages = pages;
628         gtt->num_pages = num_pages;
629         gtt->dummy_read_page = dummy_read_page;
630         gtt->populated = true;
631         return 0;
632 }
633
634 static void radeon_ttm_backend_clear(struct ttm_backend *backend)
635 {
636         struct radeon_ttm_backend *gtt;
637
638         gtt = container_of(backend, struct radeon_ttm_backend, backend);
639         gtt->pages = NULL;
640         gtt->num_pages = 0;
641         gtt->dummy_read_page = NULL;
642         gtt->populated = false;
643         gtt->bound = false;
644 }
645
646
647 static int radeon_ttm_backend_bind(struct ttm_backend *backend,
648                                    struct ttm_mem_reg *bo_mem)
649 {
650         struct radeon_ttm_backend *gtt;
651         int r;
652
653         gtt = container_of(backend, struct radeon_ttm_backend, backend);
654         gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
655         if (!gtt->num_pages) {
656                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
657         }
658         r = radeon_gart_bind(gtt->rdev, gtt->offset,
659                              gtt->num_pages, gtt->pages);
660         if (r) {
661                 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
662                           gtt->num_pages, gtt->offset);
663                 return r;
664         }
665         gtt->bound = true;
666         return 0;
667 }
668
669 static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
670 {
671         struct radeon_ttm_backend *gtt;
672
673         gtt = container_of(backend, struct radeon_ttm_backend, backend);
674         radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
675         gtt->bound = false;
676         return 0;
677 }
678
679 static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
680 {
681         struct radeon_ttm_backend *gtt;
682
683         gtt = container_of(backend, struct radeon_ttm_backend, backend);
684         if (gtt->bound) {
685                 radeon_ttm_backend_unbind(backend);
686         }
687         kfree(gtt);
688 }
689
690 static struct ttm_backend_func radeon_backend_func = {
691         .populate = &radeon_ttm_backend_populate,
692         .clear = &radeon_ttm_backend_clear,
693         .bind = &radeon_ttm_backend_bind,
694         .unbind = &radeon_ttm_backend_unbind,
695         .destroy = &radeon_ttm_backend_destroy,
696 };
697
698 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
699 {
700         struct radeon_ttm_backend *gtt;
701
702         gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
703         if (gtt == NULL) {
704                 return NULL;
705         }
706         gtt->backend.bdev = &rdev->mman.bdev;
707         gtt->backend.flags = 0;
708         gtt->backend.func = &radeon_backend_func;
709         gtt->rdev = rdev;
710         gtt->pages = NULL;
711         gtt->num_pages = 0;
712         gtt->dummy_read_page = NULL;
713         gtt->populated = false;
714         gtt->bound = false;
715         return &gtt->backend;
716 }
717
718 #define RADEON_DEBUGFS_MEM_TYPES 2
719
720 #if defined(CONFIG_DEBUG_FS)
721 static int radeon_mm_dump_table(struct seq_file *m, void *data)
722 {
723         struct drm_info_node *node = (struct drm_info_node *)m->private;
724         struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
725         struct drm_device *dev = node->minor->dev;
726         struct radeon_device *rdev = dev->dev_private;
727         int ret;
728         struct ttm_bo_global *glob = rdev->mman.bdev.glob;
729
730         spin_lock(&glob->lru_lock);
731         ret = drm_mm_dump_table(m, mm);
732         spin_unlock(&glob->lru_lock);
733         return ret;
734 }
735 #endif
736
737 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
738 {
739 #if defined(CONFIG_DEBUG_FS)
740         static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
741         static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
742         unsigned i;
743
744         for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
745                 if (i == 0)
746                         sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
747                 else
748                         sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
749                 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
750                 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
751                 radeon_mem_types_list[i].driver_features = 0;
752                 if (i == 0)
753                         radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
754                 else
755                         radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
756
757         }
758         return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
759
760 #endif
761         return 0;
762 }