Merge tag 'selinux-pr-20190612' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / usnic / usnic_uiom.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hugetlb.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <linux/pci.h>
44 #include <rdma/ib_verbs.h>
45
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_uiom_interval_tree.h"
49
50 #define USNIC_UIOM_PAGE_CHUNK                                           \
51         ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))     /\
52         ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -      \
53         (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
54
55 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
56                                 struct device *dev,
57                                 unsigned long iova, int flags,
58                                 void *token)
59 {
60         usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
61                 dev_name(dev),
62                 domain, iova, flags);
63         return -ENOSYS;
64 }
65
66 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
67 {
68         struct usnic_uiom_chunk *chunk, *tmp;
69         struct page *page;
70         struct scatterlist *sg;
71         int i;
72         dma_addr_t pa;
73
74         list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
75                 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
76                         page = sg_page(sg);
77                         pa = sg_phys(sg);
78                         if (!PageDirty(page) && dirty)
79                                 set_page_dirty_lock(page);
80                         put_page(page);
81                         usnic_dbg("pa: %pa\n", &pa);
82                 }
83                 kfree(chunk);
84         }
85 }
86
87 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
88                                 int dmasync, struct usnic_uiom_reg *uiomr)
89 {
90         struct list_head *chunk_list = &uiomr->chunk_list;
91         struct page **page_list;
92         struct scatterlist *sg;
93         struct usnic_uiom_chunk *chunk;
94         unsigned long locked;
95         unsigned long lock_limit;
96         unsigned long cur_base;
97         unsigned long npages;
98         int ret;
99         int off;
100         int i;
101         int flags;
102         dma_addr_t pa;
103         unsigned int gup_flags;
104         struct mm_struct *mm;
105
106         /*
107          * If the combination of the addr and size requested for this memory
108          * region causes an integer overflow, return error.
109          */
110         if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
111                 return -EINVAL;
112
113         if (!size)
114                 return -EINVAL;
115
116         if (!can_do_mlock())
117                 return -EPERM;
118
119         INIT_LIST_HEAD(chunk_list);
120
121         page_list = (struct page **) __get_free_page(GFP_KERNEL);
122         if (!page_list)
123                 return -ENOMEM;
124
125         npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
126
127         uiomr->owning_mm = mm = current->mm;
128         down_read(&mm->mmap_sem);
129
130         locked = atomic64_add_return(npages, &current->mm->pinned_vm);
131         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
132
133         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
134                 ret = -ENOMEM;
135                 goto out;
136         }
137
138         flags = IOMMU_READ | IOMMU_CACHE;
139         flags |= (writable) ? IOMMU_WRITE : 0;
140         gup_flags = FOLL_WRITE;
141         gup_flags |= (writable) ? 0 : FOLL_FORCE;
142         cur_base = addr & PAGE_MASK;
143         ret = 0;
144
145         while (npages) {
146                 ret = get_user_pages(cur_base,
147                                      min_t(unsigned long, npages,
148                                      PAGE_SIZE / sizeof(struct page *)),
149                                      gup_flags | FOLL_LONGTERM,
150                                      page_list, NULL);
151
152                 if (ret < 0)
153                         goto out;
154
155                 npages -= ret;
156                 off = 0;
157
158                 while (ret) {
159                         chunk = kmalloc(struct_size(chunk, page_list,
160                                         min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
161                                         GFP_KERNEL);
162                         if (!chunk) {
163                                 ret = -ENOMEM;
164                                 goto out;
165                         }
166
167                         chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
168                         sg_init_table(chunk->page_list, chunk->nents);
169                         for_each_sg(chunk->page_list, sg, chunk->nents, i) {
170                                 sg_set_page(sg, page_list[i + off],
171                                                 PAGE_SIZE, 0);
172                                 pa = sg_phys(sg);
173                                 usnic_dbg("va: 0x%lx pa: %pa\n",
174                                                 cur_base + i*PAGE_SIZE, &pa);
175                         }
176                         cur_base += chunk->nents * PAGE_SIZE;
177                         ret -= chunk->nents;
178                         off += chunk->nents;
179                         list_add_tail(&chunk->list, chunk_list);
180                 }
181
182                 ret = 0;
183         }
184
185 out:
186         if (ret < 0) {
187                 usnic_uiom_put_pages(chunk_list, 0);
188                 atomic64_sub(npages, &current->mm->pinned_vm);
189         } else
190                 mmgrab(uiomr->owning_mm);
191
192         up_read(&mm->mmap_sem);
193         free_page((unsigned long) page_list);
194         return ret;
195 }
196
197 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
198                                                 struct usnic_uiom_pd *pd)
199 {
200         struct usnic_uiom_interval_node *interval, *tmp;
201         long unsigned va, size;
202
203         list_for_each_entry_safe(interval, tmp, intervals, link) {
204                 va = interval->start << PAGE_SHIFT;
205                 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
206                 while (size > 0) {
207                         /* Workaround for RH 970401 */
208                         usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
209                         iommu_unmap(pd->domain, va, PAGE_SIZE);
210                         va += PAGE_SIZE;
211                         size -= PAGE_SIZE;
212                 }
213         }
214 }
215
216 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
217                                         struct usnic_uiom_reg *uiomr,
218                                         int dirty)
219 {
220         int npages;
221         unsigned long vpn_start, vpn_last;
222         struct usnic_uiom_interval_node *interval, *tmp;
223         int writable = 0;
224         LIST_HEAD(rm_intervals);
225
226         npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
227         vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
228         vpn_last = vpn_start + npages - 1;
229
230         spin_lock(&pd->lock);
231         usnic_uiom_remove_interval(&pd->root, vpn_start,
232                                         vpn_last, &rm_intervals);
233         usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
234
235         list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
236                 if (interval->flags & IOMMU_WRITE)
237                         writable = 1;
238                 list_del(&interval->link);
239                 kfree(interval);
240         }
241
242         usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
243         spin_unlock(&pd->lock);
244 }
245
246 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
247                                                 struct usnic_uiom_reg *uiomr)
248 {
249         int i, err;
250         size_t size;
251         struct usnic_uiom_chunk *chunk;
252         struct usnic_uiom_interval_node *interval_node;
253         dma_addr_t pa;
254         dma_addr_t pa_start = 0;
255         dma_addr_t pa_end = 0;
256         long int va_start = -EINVAL;
257         struct usnic_uiom_pd *pd = uiomr->pd;
258         long int va = uiomr->va & PAGE_MASK;
259         int flags = IOMMU_READ | IOMMU_CACHE;
260
261         flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
262         chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
263                                                                         list);
264         list_for_each_entry(interval_node, intervals, link) {
265 iter_chunk:
266                 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
267                         pa = sg_phys(&chunk->page_list[i]);
268                         if ((va >> PAGE_SHIFT) < interval_node->start)
269                                 continue;
270
271                         if ((va >> PAGE_SHIFT) == interval_node->start) {
272                                 /* First page of the interval */
273                                 va_start = va;
274                                 pa_start = pa;
275                                 pa_end = pa;
276                         }
277
278                         WARN_ON(va_start == -EINVAL);
279
280                         if ((pa_end + PAGE_SIZE != pa) &&
281                                         (pa != pa_start)) {
282                                 /* PAs are not contiguous */
283                                 size = pa_end - pa_start + PAGE_SIZE;
284                                 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
285                                         va_start, &pa_start, size, flags);
286                                 err = iommu_map(pd->domain, va_start, pa_start,
287                                                         size, flags);
288                                 if (err) {
289                                         usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
290                                                 va_start, &pa_start, size, err);
291                                         goto err_out;
292                                 }
293                                 va_start = va;
294                                 pa_start = pa;
295                                 pa_end = pa;
296                         }
297
298                         if ((va >> PAGE_SHIFT) == interval_node->last) {
299                                 /* Last page of the interval */
300                                 size = pa - pa_start + PAGE_SIZE;
301                                 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
302                                         va_start, &pa_start, size, flags);
303                                 err = iommu_map(pd->domain, va_start, pa_start,
304                                                 size, flags);
305                                 if (err) {
306                                         usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
307                                                 va_start, &pa_start, size, err);
308                                         goto err_out;
309                                 }
310                                 break;
311                         }
312
313                         if (pa != pa_start)
314                                 pa_end += PAGE_SIZE;
315                 }
316
317                 if (i == chunk->nents) {
318                         /*
319                          * Hit last entry of the chunk,
320                          * hence advance to next chunk
321                          */
322                         chunk = list_first_entry(&chunk->list,
323                                                         struct usnic_uiom_chunk,
324                                                         list);
325                         goto iter_chunk;
326                 }
327         }
328
329         return 0;
330
331 err_out:
332         usnic_uiom_unmap_sorted_intervals(intervals, pd);
333         return err;
334 }
335
336 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
337                                                 unsigned long addr, size_t size,
338                                                 int writable, int dmasync)
339 {
340         struct usnic_uiom_reg *uiomr;
341         unsigned long va_base, vpn_start, vpn_last;
342         unsigned long npages;
343         int offset, err;
344         LIST_HEAD(sorted_diff_intervals);
345
346         /*
347          * Intel IOMMU map throws an error if a translation entry is
348          * changed from read to write.  This module may not unmap
349          * and then remap the entry after fixing the permission
350          * b/c this open up a small windows where hw DMA may page fault
351          * Hence, make all entries to be writable.
352          */
353         writable = 1;
354
355         va_base = addr & PAGE_MASK;
356         offset = addr & ~PAGE_MASK;
357         npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
358         vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
359         vpn_last = vpn_start + npages - 1;
360
361         uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
362         if (!uiomr)
363                 return ERR_PTR(-ENOMEM);
364
365         uiomr->va = va_base;
366         uiomr->offset = offset;
367         uiomr->length = size;
368         uiomr->writable = writable;
369         uiomr->pd = pd;
370
371         err = usnic_uiom_get_pages(addr, size, writable, dmasync,
372                                    uiomr);
373         if (err) {
374                 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
375                                 vpn_start, vpn_last, err);
376                 goto out_free_uiomr;
377         }
378
379         spin_lock(&pd->lock);
380         err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
381                                                 (writable) ? IOMMU_WRITE : 0,
382                                                 IOMMU_WRITE,
383                                                 &pd->root,
384                                                 &sorted_diff_intervals);
385         if (err) {
386                 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
387                                                 vpn_start, vpn_last, err);
388                 goto out_put_pages;
389         }
390
391         err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
392         if (err) {
393                 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
394                                                 vpn_start, vpn_last, err);
395                 goto out_put_intervals;
396
397         }
398
399         err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
400                                         (writable) ? IOMMU_WRITE : 0);
401         if (err) {
402                 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
403                                                 vpn_start, vpn_last, err);
404                 goto out_unmap_intervals;
405         }
406
407         usnic_uiom_put_interval_set(&sorted_diff_intervals);
408         spin_unlock(&pd->lock);
409
410         return uiomr;
411
412 out_unmap_intervals:
413         usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
414 out_put_intervals:
415         usnic_uiom_put_interval_set(&sorted_diff_intervals);
416 out_put_pages:
417         usnic_uiom_put_pages(&uiomr->chunk_list, 0);
418         spin_unlock(&pd->lock);
419         mmdrop(uiomr->owning_mm);
420 out_free_uiomr:
421         kfree(uiomr);
422         return ERR_PTR(err);
423 }
424
425 static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
426 {
427         mmdrop(uiomr->owning_mm);
428         kfree(uiomr);
429 }
430
431 static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
432 {
433         return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
434 }
435
436 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
437 {
438         __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
439
440         atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
441         __usnic_uiom_release_tail(uiomr);
442 }
443
444 struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
445 {
446         struct usnic_uiom_pd *pd;
447         void *domain;
448
449         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
450         if (!pd)
451                 return ERR_PTR(-ENOMEM);
452
453         pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
454         if (!domain) {
455                 usnic_err("Failed to allocate IOMMU domain");
456                 kfree(pd);
457                 return ERR_PTR(-ENOMEM);
458         }
459
460         iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
461
462         spin_lock_init(&pd->lock);
463         INIT_LIST_HEAD(&pd->devs);
464
465         return pd;
466 }
467
468 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
469 {
470         iommu_domain_free(pd->domain);
471         kfree(pd);
472 }
473
474 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
475 {
476         struct usnic_uiom_dev *uiom_dev;
477         int err;
478
479         uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
480         if (!uiom_dev)
481                 return -ENOMEM;
482         uiom_dev->dev = dev;
483
484         err = iommu_attach_device(pd->domain, dev);
485         if (err)
486                 goto out_free_dev;
487
488         if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
489                 usnic_err("IOMMU of %s does not support cache coherency\n",
490                                 dev_name(dev));
491                 err = -EINVAL;
492                 goto out_detach_device;
493         }
494
495         spin_lock(&pd->lock);
496         list_add_tail(&uiom_dev->link, &pd->devs);
497         pd->dev_cnt++;
498         spin_unlock(&pd->lock);
499
500         return 0;
501
502 out_detach_device:
503         iommu_detach_device(pd->domain, dev);
504 out_free_dev:
505         kfree(uiom_dev);
506         return err;
507 }
508
509 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
510 {
511         struct usnic_uiom_dev *uiom_dev;
512         int found = 0;
513
514         spin_lock(&pd->lock);
515         list_for_each_entry(uiom_dev, &pd->devs, link) {
516                 if (uiom_dev->dev == dev) {
517                         found = 1;
518                         break;
519                 }
520         }
521
522         if (!found) {
523                 usnic_err("Unable to free dev %s - not found\n",
524                                 dev_name(dev));
525                 spin_unlock(&pd->lock);
526                 return;
527         }
528
529         list_del(&uiom_dev->link);
530         pd->dev_cnt--;
531         spin_unlock(&pd->lock);
532
533         return iommu_detach_device(pd->domain, dev);
534 }
535
536 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
537 {
538         struct usnic_uiom_dev *uiom_dev;
539         struct device **devs;
540         int i = 0;
541
542         spin_lock(&pd->lock);
543         devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
544         if (!devs) {
545                 devs = ERR_PTR(-ENOMEM);
546                 goto out;
547         }
548
549         list_for_each_entry(uiom_dev, &pd->devs, link) {
550                 devs[i++] = uiom_dev->dev;
551         }
552 out:
553         spin_unlock(&pd->lock);
554         return devs;
555 }
556
557 void usnic_uiom_free_dev_list(struct device **devs)
558 {
559         kfree(devs);
560 }
561
562 int usnic_uiom_init(char *drv_name)
563 {
564         if (!iommu_present(&pci_bus_type)) {
565                 usnic_err("IOMMU required but not present or enabled.  USNIC QPs will not function w/o enabling IOMMU\n");
566                 return -EPERM;
567         }
568
569         return 0;
570 }