Merge remote-tracking branch 'kvmarm/misc-5.5' into kvmarm/next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
38 #include <linux/mm.h>
39 #include <linux/pfn_t.h>
40 #include <linux/rbtree.h>
41 #include <linux/module.h>
42 #include <linux/uaccess.h>
43 #include <linux/mem_encrypt.h>
44
45 #define TTM_BO_VM_NUM_PREFAULT 16
46
47 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
48                                 struct vm_fault *vmf)
49 {
50         vm_fault_t ret = 0;
51         int err = 0;
52
53         if (likely(!bo->moving))
54                 goto out_unlock;
55
56         /*
57          * Quick non-stalling check for idle.
58          */
59         if (dma_fence_is_signaled(bo->moving))
60                 goto out_clear;
61
62         /*
63          * If possible, avoid waiting for GPU with mmap_sem
64          * held.
65          */
66         if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
67                 ret = VM_FAULT_RETRY;
68                 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
69                         goto out_unlock;
70
71                 ttm_bo_get(bo);
72                 up_read(&vmf->vma->vm_mm->mmap_sem);
73                 (void) dma_fence_wait(bo->moving, true);
74                 dma_resv_unlock(bo->base.resv);
75                 ttm_bo_put(bo);
76                 goto out_unlock;
77         }
78
79         /*
80          * Ordinary wait.
81          */
82         err = dma_fence_wait(bo->moving, true);
83         if (unlikely(err != 0)) {
84                 ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
85                         VM_FAULT_NOPAGE;
86                 goto out_unlock;
87         }
88
89 out_clear:
90         dma_fence_put(bo->moving);
91         bo->moving = NULL;
92
93 out_unlock:
94         return ret;
95 }
96
97 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
98                                        unsigned long page_offset)
99 {
100         struct ttm_bo_device *bdev = bo->bdev;
101
102         if (bdev->driver->io_mem_pfn)
103                 return bdev->driver->io_mem_pfn(bo, page_offset);
104
105         return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
106                 + page_offset;
107 }
108
109 static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
110 {
111         struct vm_area_struct *vma = vmf->vma;
112         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
113             vma->vm_private_data;
114         struct ttm_bo_device *bdev = bo->bdev;
115         unsigned long page_offset;
116         unsigned long page_last;
117         unsigned long pfn;
118         struct ttm_tt *ttm = NULL;
119         struct page *page;
120         int err;
121         int i;
122         vm_fault_t ret = VM_FAULT_NOPAGE;
123         unsigned long address = vmf->address;
124         struct ttm_mem_type_manager *man =
125                 &bdev->man[bo->mem.mem_type];
126         struct vm_area_struct cvma;
127
128         /*
129          * Work around locking order reversal in fault / nopfn
130          * between mmap_sem and bo_reserve: Perform a trylock operation
131          * for reserve, and if it fails, retry the fault after waiting
132          * for the buffer to become unreserved.
133          */
134         if (unlikely(!dma_resv_trylock(bo->base.resv))) {
135                 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
136                         if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
137                                 ttm_bo_get(bo);
138                                 up_read(&vmf->vma->vm_mm->mmap_sem);
139                                 (void) ttm_bo_wait_unreserved(bo);
140                                 ttm_bo_put(bo);
141                         }
142
143                         return VM_FAULT_RETRY;
144                 }
145
146                 /*
147                  * If we'd want to change locking order to
148                  * mmap_sem -> bo::reserve, we'd use a blocking reserve here
149                  * instead of retrying the fault...
150                  */
151                 return VM_FAULT_NOPAGE;
152         }
153
154         /*
155          * Refuse to fault imported pages. This should be handled
156          * (if at all) by redirecting mmap to the exporter.
157          */
158         if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
159                 ret = VM_FAULT_SIGBUS;
160                 goto out_unlock;
161         }
162
163         if (bdev->driver->fault_reserve_notify) {
164                 struct dma_fence *moving = dma_fence_get(bo->moving);
165
166                 err = bdev->driver->fault_reserve_notify(bo);
167                 switch (err) {
168                 case 0:
169                         break;
170                 case -EBUSY:
171                 case -ERESTARTSYS:
172                         ret = VM_FAULT_NOPAGE;
173                         goto out_unlock;
174                 default:
175                         ret = VM_FAULT_SIGBUS;
176                         goto out_unlock;
177                 }
178
179                 if (bo->moving != moving) {
180                         spin_lock(&bdev->glob->lru_lock);
181                         ttm_bo_move_to_lru_tail(bo, NULL);
182                         spin_unlock(&bdev->glob->lru_lock);
183                 }
184                 dma_fence_put(moving);
185         }
186
187         /*
188          * Wait for buffer data in transit, due to a pipelined
189          * move.
190          */
191         ret = ttm_bo_vm_fault_idle(bo, vmf);
192         if (unlikely(ret != 0)) {
193                 if (ret == VM_FAULT_RETRY &&
194                     !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
195                         /* The BO has already been unreserved. */
196                         return ret;
197                 }
198
199                 goto out_unlock;
200         }
201
202         err = ttm_mem_io_lock(man, true);
203         if (unlikely(err != 0)) {
204                 ret = VM_FAULT_NOPAGE;
205                 goto out_unlock;
206         }
207         err = ttm_mem_io_reserve_vm(bo);
208         if (unlikely(err != 0)) {
209                 ret = VM_FAULT_SIGBUS;
210                 goto out_io_unlock;
211         }
212
213         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
214                 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
215         page_last = vma_pages(vma) + vma->vm_pgoff -
216                 drm_vma_node_start(&bo->base.vma_node);
217
218         if (unlikely(page_offset >= bo->num_pages)) {
219                 ret = VM_FAULT_SIGBUS;
220                 goto out_io_unlock;
221         }
222
223         /*
224          * Make a local vma copy to modify the page_prot member
225          * and vm_flags if necessary. The vma parameter is protected
226          * by mmap_sem in write mode.
227          */
228         cvma = *vma;
229         cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
230
231         if (bo->mem.bus.is_iomem) {
232                 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
233                                                 cvma.vm_page_prot);
234         } else {
235                 struct ttm_operation_ctx ctx = {
236                         .interruptible = false,
237                         .no_wait_gpu = false,
238                         .flags = TTM_OPT_FLAG_FORCE_ALLOC
239
240                 };
241
242                 ttm = bo->ttm;
243                 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
244                                                 cvma.vm_page_prot);
245
246                 /* Allocate all page at once, most common usage */
247                 if (ttm_tt_populate(ttm, &ctx)) {
248                         ret = VM_FAULT_OOM;
249                         goto out_io_unlock;
250                 }
251         }
252
253         /*
254          * Speculatively prefault a number of pages. Only error on
255          * first page.
256          */
257         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
258                 if (bo->mem.bus.is_iomem) {
259                         /* Iomem should not be marked encrypted */
260                         cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
261                         pfn = ttm_bo_io_mem_pfn(bo, page_offset);
262                 } else {
263                         page = ttm->pages[page_offset];
264                         if (unlikely(!page && i == 0)) {
265                                 ret = VM_FAULT_OOM;
266                                 goto out_io_unlock;
267                         } else if (unlikely(!page)) {
268                                 break;
269                         }
270                         page->index = drm_vma_node_start(&bo->base.vma_node) +
271                                 page_offset;
272                         pfn = page_to_pfn(page);
273                 }
274
275                 if (vma->vm_flags & VM_MIXEDMAP)
276                         ret = vmf_insert_mixed(&cvma, address,
277                                         __pfn_to_pfn_t(pfn, PFN_DEV));
278                 else
279                         ret = vmf_insert_pfn(&cvma, address, pfn);
280
281                 /*
282                  * Somebody beat us to this PTE or prefaulting to
283                  * an already populated PTE, or prefaulting error.
284                  */
285
286                 if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
287                         break;
288                 else if (unlikely(ret & VM_FAULT_ERROR))
289                         goto out_io_unlock;
290
291                 address += PAGE_SIZE;
292                 if (unlikely(++page_offset >= page_last))
293                         break;
294         }
295         ret = VM_FAULT_NOPAGE;
296 out_io_unlock:
297         ttm_mem_io_unlock(man);
298 out_unlock:
299         dma_resv_unlock(bo->base.resv);
300         return ret;
301 }
302
303 static void ttm_bo_vm_open(struct vm_area_struct *vma)
304 {
305         struct ttm_buffer_object *bo =
306             (struct ttm_buffer_object *)vma->vm_private_data;
307
308         WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
309
310         ttm_bo_get(bo);
311 }
312
313 static void ttm_bo_vm_close(struct vm_area_struct *vma)
314 {
315         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
316
317         ttm_bo_put(bo);
318         vma->vm_private_data = NULL;
319 }
320
321 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
322                                  unsigned long offset,
323                                  uint8_t *buf, int len, int write)
324 {
325         unsigned long page = offset >> PAGE_SHIFT;
326         unsigned long bytes_left = len;
327         int ret;
328
329         /* Copy a page at a time, that way no extra virtual address
330          * mapping is needed
331          */
332         offset -= page << PAGE_SHIFT;
333         do {
334                 unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
335                 struct ttm_bo_kmap_obj map;
336                 void *ptr;
337                 bool is_iomem;
338
339                 ret = ttm_bo_kmap(bo, page, 1, &map);
340                 if (ret)
341                         return ret;
342
343                 ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
344                 WARN_ON_ONCE(is_iomem);
345                 if (write)
346                         memcpy(ptr, buf, bytes);
347                 else
348                         memcpy(buf, ptr, bytes);
349                 ttm_bo_kunmap(&map);
350
351                 page++;
352                 buf += bytes;
353                 bytes_left -= bytes;
354                 offset = 0;
355         } while (bytes_left);
356
357         return len;
358 }
359
360 static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
361                             void *buf, int len, int write)
362 {
363         unsigned long offset = (addr) - vma->vm_start;
364         struct ttm_buffer_object *bo = vma->vm_private_data;
365         int ret;
366
367         if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
368                 return -EIO;
369
370         ret = ttm_bo_reserve(bo, true, false, NULL);
371         if (ret)
372                 return ret;
373
374         switch (bo->mem.mem_type) {
375         case TTM_PL_SYSTEM:
376                 if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
377                         ret = ttm_tt_swapin(bo->ttm);
378                         if (unlikely(ret != 0))
379                                 return ret;
380                 }
381                 /* fall through */
382         case TTM_PL_TT:
383                 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
384                 break;
385         default:
386                 if (bo->bdev->driver->access_memory)
387                         ret = bo->bdev->driver->access_memory(
388                                 bo, offset, buf, len, write);
389                 else
390                         ret = -EIO;
391         }
392
393         ttm_bo_unreserve(bo);
394
395         return ret;
396 }
397
398 static const struct vm_operations_struct ttm_bo_vm_ops = {
399         .fault = ttm_bo_vm_fault,
400         .open = ttm_bo_vm_open,
401         .close = ttm_bo_vm_close,
402         .access = ttm_bo_vm_access
403 };
404
405 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
406                                                   unsigned long offset,
407                                                   unsigned long pages)
408 {
409         struct drm_vma_offset_node *node;
410         struct ttm_buffer_object *bo = NULL;
411
412         drm_vma_offset_lock_lookup(&bdev->vma_manager);
413
414         node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
415         if (likely(node)) {
416                 bo = container_of(node, struct ttm_buffer_object,
417                                   base.vma_node);
418                 bo = ttm_bo_get_unless_zero(bo);
419         }
420
421         drm_vma_offset_unlock_lookup(&bdev->vma_manager);
422
423         if (!bo)
424                 pr_err("Could not find buffer object to map\n");
425
426         return bo;
427 }
428
429 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
430                 struct ttm_bo_device *bdev)
431 {
432         struct ttm_bo_driver *driver;
433         struct ttm_buffer_object *bo;
434         int ret;
435
436         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
437                 return -EINVAL;
438
439         bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
440         if (unlikely(!bo))
441                 return -EINVAL;
442
443         driver = bo->bdev->driver;
444         if (unlikely(!driver->verify_access)) {
445                 ret = -EPERM;
446                 goto out_unref;
447         }
448         ret = driver->verify_access(bo, filp);
449         if (unlikely(ret != 0))
450                 goto out_unref;
451
452         vma->vm_ops = &ttm_bo_vm_ops;
453
454         /*
455          * Note: We're transferring the bo reference to
456          * vma->vm_private_data here.
457          */
458
459         vma->vm_private_data = bo;
460
461         /*
462          * We'd like to use VM_PFNMAP on shared mappings, where
463          * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
464          * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
465          * bad for performance. Until that has been sorted out, use
466          * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
467          */
468         vma->vm_flags |= VM_MIXEDMAP;
469         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
470         return 0;
471 out_unref:
472         ttm_bo_put(bo);
473         return ret;
474 }
475 EXPORT_SYMBOL(ttm_bo_mmap);
476
477 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
478 {
479         if (vma->vm_pgoff != 0)
480                 return -EACCES;
481
482         ttm_bo_get(bo);
483
484         vma->vm_ops = &ttm_bo_vm_ops;
485         vma->vm_private_data = bo;
486         vma->vm_flags |= VM_MIXEDMAP;
487         vma->vm_flags |= VM_IO | VM_DONTEXPAND;
488         return 0;
489 }
490 EXPORT_SYMBOL(ttm_fbdev_mmap);