Merge remote-tracking branches 'spi/topic/sh-msiof', 'spi/topic/stm32', 'spi/topic...
[sfrench/cifs-2.6.git] / drivers / staging / vboxvideo / vbox_ttm.c
1 /*
2  * Copyright (C) 2013-2017 Oracle Corporation
3  * This file is based on ast_ttm.c
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  *
27  * Authors: Dave Airlie <airlied@redhat.com>
28  *          Michael Thayer <michael.thayer@oracle.com>
29  */
30 #include "vbox_drv.h"
31 #include <ttm/ttm_page_alloc.h>
32
33 static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
34 {
35         return container_of(bd, struct vbox_private, ttm.bdev);
36 }
37
38 static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
39 {
40         return ttm_mem_global_init(ref->object);
41 }
42
43 static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
44 {
45         ttm_mem_global_release(ref->object);
46 }
47
48 /**
49  * Adds the vbox memory manager object/structures to the global memory manager.
50  */
51 static int vbox_ttm_global_init(struct vbox_private *vbox)
52 {
53         struct drm_global_reference *global_ref;
54         int ret;
55
56         global_ref = &vbox->ttm.mem_global_ref;
57         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
58         global_ref->size = sizeof(struct ttm_mem_global);
59         global_ref->init = &vbox_ttm_mem_global_init;
60         global_ref->release = &vbox_ttm_mem_global_release;
61         ret = drm_global_item_ref(global_ref);
62         if (ret) {
63                 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
64                 return ret;
65         }
66
67         vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
68         global_ref = &vbox->ttm.bo_global_ref.ref;
69         global_ref->global_type = DRM_GLOBAL_TTM_BO;
70         global_ref->size = sizeof(struct ttm_bo_global);
71         global_ref->init = &ttm_bo_global_init;
72         global_ref->release = &ttm_bo_global_release;
73
74         ret = drm_global_item_ref(global_ref);
75         if (ret) {
76                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77                 drm_global_item_unref(&vbox->ttm.mem_global_ref);
78                 return ret;
79         }
80
81         return 0;
82 }
83
84 /**
85  * Removes the vbox memory manager object from the global memory manager.
86  */
87 static void vbox_ttm_global_release(struct vbox_private *vbox)
88 {
89         drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
90         drm_global_item_unref(&vbox->ttm.mem_global_ref);
91 }
92
93 static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
94 {
95         struct vbox_bo *bo;
96
97         bo = container_of(tbo, struct vbox_bo, bo);
98
99         drm_gem_object_release(&bo->gem);
100         kfree(bo);
101 }
102
103 static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
104 {
105         if (bo->destroy == &vbox_bo_ttm_destroy)
106                 return true;
107
108         return false;
109 }
110
111 static int
112 vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
113                       struct ttm_mem_type_manager *man)
114 {
115         switch (type) {
116         case TTM_PL_SYSTEM:
117                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
118                 man->available_caching = TTM_PL_MASK_CACHING;
119                 man->default_caching = TTM_PL_FLAG_CACHED;
120                 break;
121         case TTM_PL_VRAM:
122                 man->func = &ttm_bo_manager_func;
123                 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
124                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
125                 man->default_caching = TTM_PL_FLAG_WC;
126                 break;
127         default:
128                 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
129                 return -EINVAL;
130         }
131
132         return 0;
133 }
134
135 static void
136 vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
137 {
138         struct vbox_bo *vboxbo = vbox_bo(bo);
139
140         if (!vbox_ttm_bo_is_vbox_bo(bo))
141                 return;
142
143         vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
144         *pl = vboxbo->placement;
145 }
146
147 static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
148                                  struct file *filp)
149 {
150         return 0;
151 }
152
153 static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
154                                    struct ttm_mem_reg *mem)
155 {
156         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
157         struct vbox_private *vbox = vbox_bdev(bdev);
158
159         mem->bus.addr = NULL;
160         mem->bus.offset = 0;
161         mem->bus.size = mem->num_pages << PAGE_SHIFT;
162         mem->bus.base = 0;
163         mem->bus.is_iomem = false;
164         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
165                 return -EINVAL;
166         switch (mem->mem_type) {
167         case TTM_PL_SYSTEM:
168                 /* system memory */
169                 return 0;
170         case TTM_PL_VRAM:
171                 mem->bus.offset = mem->start << PAGE_SHIFT;
172                 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
173                 mem->bus.is_iomem = true;
174                 break;
175         default:
176                 return -EINVAL;
177         }
178         return 0;
179 }
180
181 static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
182                                  struct ttm_mem_reg *mem)
183 {
184 }
185
186 static int vbox_bo_move(struct ttm_buffer_object *bo,
187                         bool evict, bool interruptible,
188                         bool no_wait_gpu, struct ttm_mem_reg *new_mem)
189 {
190         return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
191 }
192
193 static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
194 {
195         ttm_tt_fini(tt);
196         kfree(tt);
197 }
198
199 static struct ttm_backend_func vbox_tt_backend_func = {
200         .destroy = &vbox_ttm_backend_destroy,
201 };
202
203 static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
204                                          unsigned long size,
205                                          u32 page_flags,
206                                          struct page *dummy_read_page)
207 {
208         struct ttm_tt *tt;
209
210         tt = kzalloc(sizeof(*tt), GFP_KERNEL);
211         if (!tt)
212                 return NULL;
213
214         tt->func = &vbox_tt_backend_func;
215         if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
216                 kfree(tt);
217                 return NULL;
218         }
219
220         return tt;
221 }
222
223 static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
224 {
225         return ttm_pool_populate(ttm);
226 }
227
228 static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
229 {
230         ttm_pool_unpopulate(ttm);
231 }
232
233 struct ttm_bo_driver vbox_bo_driver = {
234         .ttm_tt_create = vbox_ttm_tt_create,
235         .ttm_tt_populate = vbox_ttm_tt_populate,
236         .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
237         .init_mem_type = vbox_bo_init_mem_type,
238         .eviction_valuable = ttm_bo_eviction_valuable,
239         .evict_flags = vbox_bo_evict_flags,
240         .move = vbox_bo_move,
241         .verify_access = vbox_bo_verify_access,
242         .io_mem_reserve = &vbox_ttm_io_mem_reserve,
243         .io_mem_free = &vbox_ttm_io_mem_free,
244         .io_mem_pfn = ttm_bo_default_io_mem_pfn,
245 };
246
247 int vbox_mm_init(struct vbox_private *vbox)
248 {
249         int ret;
250         struct drm_device *dev = vbox->dev;
251         struct ttm_bo_device *bdev = &vbox->ttm.bdev;
252
253         ret = vbox_ttm_global_init(vbox);
254         if (ret)
255                 return ret;
256
257         ret = ttm_bo_device_init(&vbox->ttm.bdev,
258                                  vbox->ttm.bo_global_ref.ref.object,
259                                  &vbox_bo_driver,
260                                  dev->anon_inode->i_mapping,
261                                  DRM_FILE_PAGE_OFFSET, true);
262         if (ret) {
263                 DRM_ERROR("Error initialising bo driver; %d\n", ret);
264                 goto err_ttm_global_release;
265         }
266
267         ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
268                              vbox->available_vram_size >> PAGE_SHIFT);
269         if (ret) {
270                 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
271                 goto err_device_release;
272         }
273
274 #ifdef DRM_MTRR_WC
275         vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
276                                      pci_resource_len(dev->pdev, 0),
277                                      DRM_MTRR_WC);
278 #else
279         vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
280                                          pci_resource_len(dev->pdev, 0));
281 #endif
282         return 0;
283
284 err_device_release:
285         ttm_bo_device_release(&vbox->ttm.bdev);
286 err_ttm_global_release:
287         vbox_ttm_global_release(vbox);
288         return ret;
289 }
290
291 void vbox_mm_fini(struct vbox_private *vbox)
292 {
293 #ifdef DRM_MTRR_WC
294         drm_mtrr_del(vbox->fb_mtrr,
295                      pci_resource_start(vbox->dev->pdev, 0),
296                      pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
297 #else
298         arch_phys_wc_del(vbox->fb_mtrr);
299 #endif
300         ttm_bo_device_release(&vbox->ttm.bdev);
301         vbox_ttm_global_release(vbox);
302 }
303
304 void vbox_ttm_placement(struct vbox_bo *bo, int domain)
305 {
306         unsigned int i;
307         u32 c = 0;
308
309         bo->placement.placement = bo->placements;
310         bo->placement.busy_placement = bo->placements;
311
312         if (domain & TTM_PL_FLAG_VRAM)
313                 bo->placements[c++].flags =
314                     TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
315         if (domain & TTM_PL_FLAG_SYSTEM)
316                 bo->placements[c++].flags =
317                     TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
318         if (!c)
319                 bo->placements[c++].flags =
320                     TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
321
322         bo->placement.num_placement = c;
323         bo->placement.num_busy_placement = c;
324
325         for (i = 0; i < c; ++i) {
326                 bo->placements[i].fpfn = 0;
327                 bo->placements[i].lpfn = 0;
328         }
329 }
330
331 int vbox_bo_create(struct drm_device *dev, int size, int align,
332                    u32 flags, struct vbox_bo **pvboxbo)
333 {
334         struct vbox_private *vbox = dev->dev_private;
335         struct vbox_bo *vboxbo;
336         size_t acc_size;
337         int ret;
338
339         vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
340         if (!vboxbo)
341                 return -ENOMEM;
342
343         ret = drm_gem_object_init(dev, &vboxbo->gem, size);
344         if (ret)
345                 goto err_free_vboxbo;
346
347         vboxbo->bo.bdev = &vbox->ttm.bdev;
348
349         vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
350
351         acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
352                                        sizeof(struct vbox_bo));
353
354         ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
355                           ttm_bo_type_device, &vboxbo->placement,
356                           align >> PAGE_SHIFT, false, NULL, acc_size,
357                           NULL, NULL, vbox_bo_ttm_destroy);
358         if (ret)
359                 goto err_free_vboxbo;
360
361         *pvboxbo = vboxbo;
362
363         return 0;
364
365 err_free_vboxbo:
366         kfree(vboxbo);
367         return ret;
368 }
369
370 static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
371 {
372         return bo->bo.offset;
373 }
374
375 int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
376 {
377         int i, ret;
378
379         if (bo->pin_count) {
380                 bo->pin_count++;
381                 if (gpu_addr)
382                         *gpu_addr = vbox_bo_gpu_offset(bo);
383
384                 return 0;
385         }
386
387         vbox_ttm_placement(bo, pl_flag);
388
389         for (i = 0; i < bo->placement.num_placement; i++)
390                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
391
392         ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
393         if (ret)
394                 return ret;
395
396         bo->pin_count = 1;
397
398         if (gpu_addr)
399                 *gpu_addr = vbox_bo_gpu_offset(bo);
400
401         return 0;
402 }
403
404 int vbox_bo_unpin(struct vbox_bo *bo)
405 {
406         int i, ret;
407
408         if (!bo->pin_count) {
409                 DRM_ERROR("unpin bad %p\n", bo);
410                 return 0;
411         }
412         bo->pin_count--;
413         if (bo->pin_count)
414                 return 0;
415
416         for (i = 0; i < bo->placement.num_placement; i++)
417                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
418
419         ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
420         if (ret)
421                 return ret;
422
423         return 0;
424 }
425
426 /*
427  * Move a vbox-owned buffer object to system memory if no one else has it
428  * pinned.  The caller must have pinned it previously, and this call will
429  * release the caller's pin.
430  */
431 int vbox_bo_push_sysram(struct vbox_bo *bo)
432 {
433         int i, ret;
434
435         if (!bo->pin_count) {
436                 DRM_ERROR("unpin bad %p\n", bo);
437                 return 0;
438         }
439         bo->pin_count--;
440         if (bo->pin_count)
441                 return 0;
442
443         if (bo->kmap.virtual)
444                 ttm_bo_kunmap(&bo->kmap);
445
446         vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
447
448         for (i = 0; i < bo->placement.num_placement; i++)
449                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
450
451         ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
452         if (ret) {
453                 DRM_ERROR("pushing to VRAM failed\n");
454                 return ret;
455         }
456
457         return 0;
458 }
459
460 int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
461 {
462         struct drm_file *file_priv;
463         struct vbox_private *vbox;
464
465         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
466                 return -EINVAL;
467
468         file_priv = filp->private_data;
469         vbox = file_priv->minor->dev->dev_private;
470
471         return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
472 }