Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[sfrench/cifs-2.6.git] / drivers / gpu / drm / virtio / virtgpu_ttm.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27
28 #include <ttm/ttm_bo_api.h>
29 #include <ttm/ttm_bo_driver.h>
30 #include <ttm/ttm_placement.h>
31 #include <ttm/ttm_page_alloc.h>
32 #include <ttm/ttm_module.h>
33 #include <drm/drmP.h>
34 #include <drm/drm.h>
35 #include <drm/virtgpu_drm.h>
36 #include "virtgpu_drv.h"
37
38 #include <linux/delay.h>
39
40 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
41
42 static struct
43 virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
44 {
45         struct virtio_gpu_mman *mman;
46         struct virtio_gpu_device *vgdev;
47
48         mman = container_of(bdev, struct virtio_gpu_mman, bdev);
49         vgdev = container_of(mman, struct virtio_gpu_device, mman);
50         return vgdev;
51 }
52
53 static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
54 {
55         return ttm_mem_global_init(ref->object);
56 }
57
58 static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
59 {
60         ttm_mem_global_release(ref->object);
61 }
62
63 static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
64 {
65         struct drm_global_reference *global_ref;
66         int r;
67
68         vgdev->mman.mem_global_referenced = false;
69         global_ref = &vgdev->mman.mem_global_ref;
70         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
71         global_ref->size = sizeof(struct ttm_mem_global);
72         global_ref->init = &virtio_gpu_ttm_mem_global_init;
73         global_ref->release = &virtio_gpu_ttm_mem_global_release;
74
75         r = drm_global_item_ref(global_ref);
76         if (r != 0) {
77                 DRM_ERROR("Failed setting up TTM memory accounting "
78                           "subsystem.\n");
79                 return r;
80         }
81
82         vgdev->mman.bo_global_ref.mem_glob =
83                 vgdev->mman.mem_global_ref.object;
84         global_ref = &vgdev->mman.bo_global_ref.ref;
85         global_ref->global_type = DRM_GLOBAL_TTM_BO;
86         global_ref->size = sizeof(struct ttm_bo_global);
87         global_ref->init = &ttm_bo_global_init;
88         global_ref->release = &ttm_bo_global_release;
89         r = drm_global_item_ref(global_ref);
90         if (r != 0) {
91                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
92                 drm_global_item_unref(&vgdev->mman.mem_global_ref);
93                 return r;
94         }
95
96         vgdev->mman.mem_global_referenced = true;
97         return 0;
98 }
99
100 static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
101 {
102         if (vgdev->mman.mem_global_referenced) {
103                 drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
104                 drm_global_item_unref(&vgdev->mman.mem_global_ref);
105                 vgdev->mman.mem_global_referenced = false;
106         }
107 }
108
109 #if 0
110 /*
111  * Hmm, seems to not do anything useful.  Leftover debug hack?
112  * Something like printing pagefaults to kernel log?
113  */
114 static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
115 static const struct vm_operations_struct *ttm_vm_ops;
116
117 static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
118 {
119         struct ttm_buffer_object *bo;
120         struct virtio_gpu_device *vgdev;
121         int r;
122
123         bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
124         if (bo == NULL)
125                 return VM_FAULT_NOPAGE;
126         vgdev = virtio_gpu_get_vgdev(bo->bdev);
127         r = ttm_vm_ops->fault(vmf);
128         return r;
129 }
130 #endif
131
132 int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
133 {
134         struct drm_file *file_priv;
135         struct virtio_gpu_device *vgdev;
136         int r;
137
138         file_priv = filp->private_data;
139         vgdev = file_priv->minor->dev->dev_private;
140         if (vgdev == NULL) {
141                 DRM_ERROR(
142                  "filp->private_data->minor->dev->dev_private == NULL\n");
143                 return -EINVAL;
144         }
145         r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
146 #if 0
147         if (unlikely(r != 0))
148                 return r;
149         if (unlikely(ttm_vm_ops == NULL)) {
150                 ttm_vm_ops = vma->vm_ops;
151                 virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
152                 virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
153         }
154         vma->vm_ops = &virtio_gpu_ttm_vm_ops;
155         return 0;
156 #else
157         return r;
158 #endif
159 }
160
161 static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
162                                         uint32_t flags)
163 {
164         return 0;
165 }
166
167 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
168                                struct ttm_buffer_object *bo,
169                                const struct ttm_place *place,
170                                struct ttm_mem_reg *mem)
171 {
172         mem->mm_node = (void *)1;
173         return 0;
174 }
175
176 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
177                                 struct ttm_mem_reg *mem)
178 {
179         mem->mm_node = (void *)NULL;
180         return;
181 }
182
183 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
184                            unsigned long p_size)
185 {
186         return 0;
187 }
188
189 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
190 {
191         return 0;
192 }
193
194 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
195                              const char *prefix)
196 {
197 }
198
199 static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
200         .init = ttm_bo_man_init,
201         .takedown = ttm_bo_man_takedown,
202         .get_node = ttm_bo_man_get_node,
203         .put_node = ttm_bo_man_put_node,
204         .debug = ttm_bo_man_debug
205 };
206
207 static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
208                                     struct ttm_mem_type_manager *man)
209 {
210         struct virtio_gpu_device *vgdev;
211
212         vgdev = virtio_gpu_get_vgdev(bdev);
213
214         switch (type) {
215         case TTM_PL_SYSTEM:
216                 /* System memory */
217                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
218                 man->available_caching = TTM_PL_MASK_CACHING;
219                 man->default_caching = TTM_PL_FLAG_CACHED;
220                 break;
221         case TTM_PL_TT:
222                 man->func = &virtio_gpu_bo_manager_func;
223                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
224                 man->available_caching = TTM_PL_MASK_CACHING;
225                 man->default_caching = TTM_PL_FLAG_CACHED;
226                 break;
227         default:
228                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
229                 return -EINVAL;
230         }
231         return 0;
232 }
233
234 static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
235                                 struct ttm_placement *placement)
236 {
237         static struct ttm_place placements = {
238                 .fpfn  = 0,
239                 .lpfn  = 0,
240                 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
241         };
242
243         placement->placement = &placements;
244         placement->busy_placement = &placements;
245         placement->num_placement = 1;
246         placement->num_busy_placement = 1;
247         return;
248 }
249
250 static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
251                                     struct file *filp)
252 {
253         return 0;
254 }
255
256 static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
257                                          struct ttm_mem_reg *mem)
258 {
259         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
260
261         mem->bus.addr = NULL;
262         mem->bus.offset = 0;
263         mem->bus.size = mem->num_pages << PAGE_SHIFT;
264         mem->bus.base = 0;
265         mem->bus.is_iomem = false;
266         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
267                 return -EINVAL;
268         switch (mem->mem_type) {
269         case TTM_PL_SYSTEM:
270         case TTM_PL_TT:
271                 /* system memory */
272                 return 0;
273         default:
274                 return -EINVAL;
275         }
276         return 0;
277 }
278
279 static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
280                                        struct ttm_mem_reg *mem)
281 {
282 }
283
284 /*
285  * TTM backend functions.
286  */
287 struct virtio_gpu_ttm_tt {
288         struct ttm_dma_tt               ttm;
289         struct virtio_gpu_device        *vgdev;
290         u64                             offset;
291 };
292
293 static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
294                                        struct ttm_mem_reg *bo_mem)
295 {
296         struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
297
298         gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
299         if (!ttm->num_pages)
300                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
301                      ttm->num_pages, bo_mem, ttm);
302
303         /* Not implemented */
304         return 0;
305 }
306
307 static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
308 {
309         /* Not implemented */
310         return 0;
311 }
312
313 static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
314 {
315         struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
316
317         ttm_dma_tt_fini(&gtt->ttm);
318         kfree(gtt);
319 }
320
321 static struct ttm_backend_func virtio_gpu_backend_func = {
322         .bind = &virtio_gpu_ttm_backend_bind,
323         .unbind = &virtio_gpu_ttm_backend_unbind,
324         .destroy = &virtio_gpu_ttm_backend_destroy,
325 };
326
327 static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
328 {
329         if (ttm->state != tt_unpopulated)
330                 return 0;
331
332         return ttm_pool_populate(ttm);
333 }
334
335 static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
336 {
337         ttm_pool_unpopulate(ttm);
338 }
339
340 static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
341                                                unsigned long size,
342                                                uint32_t page_flags,
343                                                struct page *dummy_read_page)
344 {
345         struct virtio_gpu_device *vgdev;
346         struct virtio_gpu_ttm_tt *gtt;
347
348         vgdev = virtio_gpu_get_vgdev(bdev);
349         gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
350         if (gtt == NULL)
351                 return NULL;
352         gtt->ttm.ttm.func = &virtio_gpu_backend_func;
353         gtt->vgdev = vgdev;
354         if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
355                             dummy_read_page)) {
356                 kfree(gtt);
357                 return NULL;
358         }
359         return &gtt->ttm.ttm;
360 }
361
362 static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
363                                  struct ttm_mem_reg *new_mem)
364 {
365         struct ttm_mem_reg *old_mem = &bo->mem;
366
367         BUG_ON(old_mem->mm_node != NULL);
368         *old_mem = *new_mem;
369         new_mem->mm_node = NULL;
370 }
371
372 static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
373                               bool evict, bool interruptible,
374                               bool no_wait_gpu,
375                               struct ttm_mem_reg *new_mem)
376 {
377         int ret;
378
379         ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
380         if (ret)
381                 return ret;
382
383         virtio_gpu_move_null(bo, new_mem);
384         return 0;
385 }
386
387 static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
388                                       bool evict,
389                                       struct ttm_mem_reg *new_mem)
390 {
391         struct virtio_gpu_object *bo;
392         struct virtio_gpu_device *vgdev;
393
394         bo = container_of(tbo, struct virtio_gpu_object, tbo);
395         vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
396
397         if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
398                 if (bo->hw_res_handle)
399                         virtio_gpu_cmd_resource_inval_backing(vgdev,
400                                                            bo->hw_res_handle);
401
402         } else if (new_mem->placement & TTM_PL_FLAG_TT) {
403                 if (bo->hw_res_handle) {
404                         virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
405                                                  NULL);
406                 }
407         }
408 }
409
410 static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
411 {
412         struct virtio_gpu_object *bo;
413         struct virtio_gpu_device *vgdev;
414
415         bo = container_of(tbo, struct virtio_gpu_object, tbo);
416         vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
417
418         if (bo->pages)
419                 virtio_gpu_object_free_sg_table(bo);
420 }
421
422 static struct ttm_bo_driver virtio_gpu_bo_driver = {
423         .ttm_tt_create = &virtio_gpu_ttm_tt_create,
424         .ttm_tt_populate = &virtio_gpu_ttm_tt_populate,
425         .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
426         .invalidate_caches = &virtio_gpu_invalidate_caches,
427         .init_mem_type = &virtio_gpu_init_mem_type,
428         .eviction_valuable = ttm_bo_eviction_valuable,
429         .evict_flags = &virtio_gpu_evict_flags,
430         .move = &virtio_gpu_bo_move,
431         .verify_access = &virtio_gpu_verify_access,
432         .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
433         .io_mem_free = &virtio_gpu_ttm_io_mem_free,
434         .move_notify = &virtio_gpu_bo_move_notify,
435         .swap_notify = &virtio_gpu_bo_swap_notify,
436 };
437
438 int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
439 {
440         int r;
441
442         r = virtio_gpu_ttm_global_init(vgdev);
443         if (r)
444                 return r;
445         /* No others user of address space so set it to 0 */
446         r = ttm_bo_device_init(&vgdev->mman.bdev,
447                                vgdev->mman.bo_global_ref.ref.object,
448                                &virtio_gpu_bo_driver,
449                                vgdev->ddev->anon_inode->i_mapping,
450                                DRM_FILE_PAGE_OFFSET, 0);
451         if (r) {
452                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
453                 goto err_dev_init;
454         }
455
456         r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
457         if (r) {
458                 DRM_ERROR("Failed initializing GTT heap.\n");
459                 goto err_mm_init;
460         }
461         return 0;
462
463 err_mm_init:
464         ttm_bo_device_release(&vgdev->mman.bdev);
465 err_dev_init:
466         virtio_gpu_ttm_global_fini(vgdev);
467         return r;
468 }
469
470 void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
471 {
472         ttm_bo_device_release(&vgdev->mman.bdev);
473         virtio_gpu_ttm_global_fini(vgdev);
474         DRM_INFO("virtio_gpu: ttm finalized\n");
475 }