c49db7c33979c9245744ead6262c108e16205159
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / usnic / usnic_uiom.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hugetlb.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <linux/pci.h>
44
45 #include "usnic_log.h"
46 #include "usnic_uiom.h"
47 #include "usnic_uiom_interval_tree.h"
48
49 static struct workqueue_struct *usnic_uiom_wq;
50
51 #define USNIC_UIOM_PAGE_CHUNK                                           \
52         ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))     /\
53         ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -      \
54         (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
55
56 static void usnic_uiom_reg_account(struct work_struct *work)
57 {
58         struct usnic_uiom_reg *umem = container_of(work,
59                                                 struct usnic_uiom_reg, work);
60
61         down_write(&umem->mm->mmap_sem);
62         umem->mm->locked_vm -= umem->diff;
63         up_write(&umem->mm->mmap_sem);
64         mmput(umem->mm);
65         kfree(umem);
66 }
67
68 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
69                                 struct device *dev,
70                                 unsigned long iova, int flags,
71                                 void *token)
72 {
73         usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
74                 dev_name(dev),
75                 domain, iova, flags);
76         return -ENOSYS;
77 }
78
79 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
80 {
81         struct usnic_uiom_chunk *chunk, *tmp;
82         struct page *page;
83         struct scatterlist *sg;
84         int i;
85         dma_addr_t pa;
86
87         list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
88                 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
89                         page = sg_page(sg);
90                         pa = sg_phys(sg);
91                         if (dirty)
92                                 set_page_dirty_lock(page);
93                         put_page(page);
94                         usnic_dbg("pa: %pa\n", &pa);
95                 }
96                 kfree(chunk);
97         }
98 }
99
100 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
101                                 int dmasync, struct list_head *chunk_list)
102 {
103         struct page **page_list;
104         struct scatterlist *sg;
105         struct usnic_uiom_chunk *chunk;
106         unsigned long locked;
107         unsigned long lock_limit;
108         unsigned long cur_base;
109         unsigned long npages;
110         int ret;
111         int off;
112         int i;
113         int flags;
114         dma_addr_t pa;
115         unsigned int gup_flags;
116
117         if (!can_do_mlock())
118                 return -EPERM;
119
120         INIT_LIST_HEAD(chunk_list);
121
122         page_list = (struct page **) __get_free_page(GFP_KERNEL);
123         if (!page_list)
124                 return -ENOMEM;
125
126         npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
127
128         down_write(&current->mm->mmap_sem);
129
130         locked = npages + current->mm->locked_vm;
131         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
132
133         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
134                 ret = -ENOMEM;
135                 goto out;
136         }
137
138         flags = IOMMU_READ | IOMMU_CACHE;
139         flags |= (writable) ? IOMMU_WRITE : 0;
140         gup_flags = FOLL_WRITE;
141         gup_flags |= (writable) ? 0 : FOLL_FORCE;
142         cur_base = addr & PAGE_MASK;
143         ret = 0;
144
145         while (npages) {
146                 ret = get_user_pages(cur_base,
147                                         min_t(unsigned long, npages,
148                                         PAGE_SIZE / sizeof(struct page *)),
149                                         gup_flags, page_list, NULL);
150
151                 if (ret < 0)
152                         goto out;
153
154                 npages -= ret;
155                 off = 0;
156
157                 while (ret) {
158                         chunk = kmalloc(sizeof(*chunk) +
159                                         sizeof(struct scatterlist) *
160                                         min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
161                                         GFP_KERNEL);
162                         if (!chunk) {
163                                 ret = -ENOMEM;
164                                 goto out;
165                         }
166
167                         chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
168                         sg_init_table(chunk->page_list, chunk->nents);
169                         for_each_sg(chunk->page_list, sg, chunk->nents, i) {
170                                 sg_set_page(sg, page_list[i + off],
171                                                 PAGE_SIZE, 0);
172                                 pa = sg_phys(sg);
173                                 usnic_dbg("va: 0x%lx pa: %pa\n",
174                                                 cur_base + i*PAGE_SIZE, &pa);
175                         }
176                         cur_base += chunk->nents * PAGE_SIZE;
177                         ret -= chunk->nents;
178                         off += chunk->nents;
179                         list_add_tail(&chunk->list, chunk_list);
180                 }
181
182                 ret = 0;
183         }
184
185 out:
186         if (ret < 0)
187                 usnic_uiom_put_pages(chunk_list, 0);
188         else
189                 current->mm->locked_vm = locked;
190
191         up_write(&current->mm->mmap_sem);
192         free_page((unsigned long) page_list);
193         return ret;
194 }
195
196 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
197                                                 struct usnic_uiom_pd *pd)
198 {
199         struct usnic_uiom_interval_node *interval, *tmp;
200         long unsigned va, size;
201
202         list_for_each_entry_safe(interval, tmp, intervals, link) {
203                 va = interval->start << PAGE_SHIFT;
204                 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
205                 while (size > 0) {
206                         /* Workaround for RH 970401 */
207                         usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
208                         iommu_unmap(pd->domain, va, PAGE_SIZE);
209                         va += PAGE_SIZE;
210                         size -= PAGE_SIZE;
211                 }
212         }
213 }
214
215 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
216                                         struct usnic_uiom_reg *uiomr,
217                                         int dirty)
218 {
219         int npages;
220         unsigned long vpn_start, vpn_last;
221         struct usnic_uiom_interval_node *interval, *tmp;
222         int writable = 0;
223         LIST_HEAD(rm_intervals);
224
225         npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
226         vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
227         vpn_last = vpn_start + npages - 1;
228
229         spin_lock(&pd->lock);
230         usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
231                                         vpn_last, &rm_intervals);
232         usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
233
234         list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
235                 if (interval->flags & IOMMU_WRITE)
236                         writable = 1;
237                 list_del(&interval->link);
238                 kfree(interval);
239         }
240
241         usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
242         spin_unlock(&pd->lock);
243 }
244
245 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
246                                                 struct usnic_uiom_reg *uiomr)
247 {
248         int i, err;
249         size_t size;
250         struct usnic_uiom_chunk *chunk;
251         struct usnic_uiom_interval_node *interval_node;
252         dma_addr_t pa;
253         dma_addr_t pa_start = 0;
254         dma_addr_t pa_end = 0;
255         long int va_start = -EINVAL;
256         struct usnic_uiom_pd *pd = uiomr->pd;
257         long int va = uiomr->va & PAGE_MASK;
258         int flags = IOMMU_READ | IOMMU_CACHE;
259
260         flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
261         chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
262                                                                         list);
263         list_for_each_entry(interval_node, intervals, link) {
264 iter_chunk:
265                 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
266                         pa = sg_phys(&chunk->page_list[i]);
267                         if ((va >> PAGE_SHIFT) < interval_node->start)
268                                 continue;
269
270                         if ((va >> PAGE_SHIFT) == interval_node->start) {
271                                 /* First page of the interval */
272                                 va_start = va;
273                                 pa_start = pa;
274                                 pa_end = pa;
275                         }
276
277                         WARN_ON(va_start == -EINVAL);
278
279                         if ((pa_end + PAGE_SIZE != pa) &&
280                                         (pa != pa_start)) {
281                                 /* PAs are not contiguous */
282                                 size = pa_end - pa_start + PAGE_SIZE;
283                                 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
284                                         va_start, &pa_start, size, flags);
285                                 err = iommu_map(pd->domain, va_start, pa_start,
286                                                         size, flags);
287                                 if (err) {
288                                         usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
289                                                 va_start, &pa_start, size, err);
290                                         goto err_out;
291                                 }
292                                 va_start = va;
293                                 pa_start = pa;
294                                 pa_end = pa;
295                         }
296
297                         if ((va >> PAGE_SHIFT) == interval_node->last) {
298                                 /* Last page of the interval */
299                                 size = pa - pa_start + PAGE_SIZE;
300                                 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
301                                         va_start, &pa_start, size, flags);
302                                 err = iommu_map(pd->domain, va_start, pa_start,
303                                                 size, flags);
304                                 if (err) {
305                                         usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
306                                                 va_start, &pa_start, size, err);
307                                         goto err_out;
308                                 }
309                                 break;
310                         }
311
312                         if (pa != pa_start)
313                                 pa_end += PAGE_SIZE;
314                 }
315
316                 if (i == chunk->nents) {
317                         /*
318                          * Hit last entry of the chunk,
319                          * hence advance to next chunk
320                          */
321                         chunk = list_first_entry(&chunk->list,
322                                                         struct usnic_uiom_chunk,
323                                                         list);
324                         goto iter_chunk;
325                 }
326         }
327
328         return 0;
329
330 err_out:
331         usnic_uiom_unmap_sorted_intervals(intervals, pd);
332         return err;
333 }
334
335 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
336                                                 unsigned long addr, size_t size,
337                                                 int writable, int dmasync)
338 {
339         struct usnic_uiom_reg *uiomr;
340         unsigned long va_base, vpn_start, vpn_last;
341         unsigned long npages;
342         int offset, err;
343         LIST_HEAD(sorted_diff_intervals);
344
345         /*
346          * Intel IOMMU map throws an error if a translation entry is
347          * changed from read to write.  This module may not unmap
348          * and then remap the entry after fixing the permission
349          * b/c this open up a small windows where hw DMA may page fault
350          * Hence, make all entries to be writable.
351          */
352         writable = 1;
353
354         va_base = addr & PAGE_MASK;
355         offset = addr & ~PAGE_MASK;
356         npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
357         vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
358         vpn_last = vpn_start + npages - 1;
359
360         uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
361         if (!uiomr)
362                 return ERR_PTR(-ENOMEM);
363
364         uiomr->va = va_base;
365         uiomr->offset = offset;
366         uiomr->length = size;
367         uiomr->writable = writable;
368         uiomr->pd = pd;
369
370         err = usnic_uiom_get_pages(addr, size, writable, dmasync,
371                                         &uiomr->chunk_list);
372         if (err) {
373                 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
374                                 vpn_start, vpn_last, err);
375                 goto out_free_uiomr;
376         }
377
378         spin_lock(&pd->lock);
379         err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
380                                                 (writable) ? IOMMU_WRITE : 0,
381                                                 IOMMU_WRITE,
382                                                 &pd->rb_root,
383                                                 &sorted_diff_intervals);
384         if (err) {
385                 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
386                                                 vpn_start, vpn_last, err);
387                 goto out_put_pages;
388         }
389
390         err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
391         if (err) {
392                 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
393                                                 vpn_start, vpn_last, err);
394                 goto out_put_intervals;
395
396         }
397
398         err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
399                                         (writable) ? IOMMU_WRITE : 0);
400         if (err) {
401                 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
402                                                 vpn_start, vpn_last, err);
403                 goto out_unmap_intervals;
404         }
405
406         usnic_uiom_put_interval_set(&sorted_diff_intervals);
407         spin_unlock(&pd->lock);
408
409         return uiomr;
410
411 out_unmap_intervals:
412         usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
413 out_put_intervals:
414         usnic_uiom_put_interval_set(&sorted_diff_intervals);
415 out_put_pages:
416         usnic_uiom_put_pages(&uiomr->chunk_list, 0);
417         spin_unlock(&pd->lock);
418 out_free_uiomr:
419         kfree(uiomr);
420         return ERR_PTR(err);
421 }
422
423 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
424 {
425         struct mm_struct *mm;
426         unsigned long diff;
427
428         __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
429
430         mm = get_task_mm(current);
431         if (!mm) {
432                 kfree(uiomr);
433                 return;
434         }
435
436         diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
437
438         /*
439          * We may be called with the mm's mmap_sem already held.  This
440          * can happen when a userspace munmap() is the call that drops
441          * the last reference to our file and calls our release
442          * method.  If there are memory regions to destroy, we'll end
443          * up here and not be able to take the mmap_sem.  In that case
444          * we defer the vm_locked accounting to the system workqueue.
445          */
446         if (closing) {
447                 if (!down_write_trylock(&mm->mmap_sem)) {
448                         INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
449                         uiomr->mm = mm;
450                         uiomr->diff = diff;
451
452                         queue_work(usnic_uiom_wq, &uiomr->work);
453                         return;
454                 }
455         } else
456                 down_write(&mm->mmap_sem);
457
458         current->mm->locked_vm -= diff;
459         up_write(&mm->mmap_sem);
460         mmput(mm);
461         kfree(uiomr);
462 }
463
464 struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
465 {
466         struct usnic_uiom_pd *pd;
467         void *domain;
468
469         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
470         if (!pd)
471                 return ERR_PTR(-ENOMEM);
472
473         pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
474         if (!domain) {
475                 usnic_err("Failed to allocate IOMMU domain");
476                 kfree(pd);
477                 return ERR_PTR(-ENOMEM);
478         }
479
480         iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
481
482         spin_lock_init(&pd->lock);
483         INIT_LIST_HEAD(&pd->devs);
484
485         return pd;
486 }
487
488 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
489 {
490         iommu_domain_free(pd->domain);
491         kfree(pd);
492 }
493
494 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
495 {
496         struct usnic_uiom_dev *uiom_dev;
497         int err;
498
499         uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
500         if (!uiom_dev)
501                 return -ENOMEM;
502         uiom_dev->dev = dev;
503
504         err = iommu_attach_device(pd->domain, dev);
505         if (err)
506                 goto out_free_dev;
507
508         if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
509                 usnic_err("IOMMU of %s does not support cache coherency\n",
510                                 dev_name(dev));
511                 err = -EINVAL;
512                 goto out_detach_device;
513         }
514
515         spin_lock(&pd->lock);
516         list_add_tail(&uiom_dev->link, &pd->devs);
517         pd->dev_cnt++;
518         spin_unlock(&pd->lock);
519
520         return 0;
521
522 out_detach_device:
523         iommu_detach_device(pd->domain, dev);
524 out_free_dev:
525         kfree(uiom_dev);
526         return err;
527 }
528
529 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
530 {
531         struct usnic_uiom_dev *uiom_dev;
532         int found = 0;
533
534         spin_lock(&pd->lock);
535         list_for_each_entry(uiom_dev, &pd->devs, link) {
536                 if (uiom_dev->dev == dev) {
537                         found = 1;
538                         break;
539                 }
540         }
541
542         if (!found) {
543                 usnic_err("Unable to free dev %s - not found\n",
544                                 dev_name(dev));
545                 spin_unlock(&pd->lock);
546                 return;
547         }
548
549         list_del(&uiom_dev->link);
550         pd->dev_cnt--;
551         spin_unlock(&pd->lock);
552
553         return iommu_detach_device(pd->domain, dev);
554 }
555
556 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
557 {
558         struct usnic_uiom_dev *uiom_dev;
559         struct device **devs;
560         int i = 0;
561
562         spin_lock(&pd->lock);
563         devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
564         if (!devs) {
565                 devs = ERR_PTR(-ENOMEM);
566                 goto out;
567         }
568
569         list_for_each_entry(uiom_dev, &pd->devs, link) {
570                 devs[i++] = uiom_dev->dev;
571         }
572 out:
573         spin_unlock(&pd->lock);
574         return devs;
575 }
576
577 void usnic_uiom_free_dev_list(struct device **devs)
578 {
579         kfree(devs);
580 }
581
582 int usnic_uiom_init(char *drv_name)
583 {
584         if (!iommu_present(&pci_bus_type)) {
585                 usnic_err("IOMMU required but not present or enabled.  USNIC QPs will not function w/o enabling IOMMU\n");
586                 return -EPERM;
587         }
588
589         usnic_uiom_wq = create_workqueue(drv_name);
590         if (!usnic_uiom_wq) {
591                 usnic_err("Unable to alloc wq for drv %s\n", drv_name);
592                 return -ENOMEM;
593         }
594
595         return 0;
596 }
597
598 void usnic_uiom_fini(void)
599 {
600         flush_workqueue(usnic_uiom_wq);
601         destroy_workqueue(usnic_uiom_wq);
602 }