Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
[sfrench/cifs-2.6.git] / arch / powerpc / mm / copro_fault.c
1 /*
2  * CoProcessor (SPU/AFU) mm fault handler
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  * Author: Jeremy Kerr <jk@ozlabs.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 #include <linux/sched.h>
24 #include <linux/mm.h>
25 #include <linux/export.h>
26 #include <asm/reg.h>
27 #include <asm/copro.h>
28 #include <asm/spu.h>
29 #include <misc/cxl.h>
30
31 /*
32  * This ought to be kept in sync with the powerpc specific do_page_fault
33  * function. Currently, there are a few corner cases that we haven't had
34  * to handle fortunately.
35  */
36 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
37                 unsigned long dsisr, unsigned *flt)
38 {
39         struct vm_area_struct *vma;
40         unsigned long is_write;
41         int ret;
42
43         if (mm == NULL)
44                 return -EFAULT;
45
46         if (mm->pgd == NULL)
47                 return -EFAULT;
48
49         down_read(&mm->mmap_sem);
50         ret = -EFAULT;
51         vma = find_vma(mm, ea);
52         if (!vma)
53                 goto out_unlock;
54
55         if (ea < vma->vm_start) {
56                 if (!(vma->vm_flags & VM_GROWSDOWN))
57                         goto out_unlock;
58                 if (expand_stack(vma, ea))
59                         goto out_unlock;
60         }
61
62         is_write = dsisr & DSISR_ISSTORE;
63         if (is_write) {
64                 if (!(vma->vm_flags & VM_WRITE))
65                         goto out_unlock;
66         } else {
67                 if (dsisr & DSISR_PROTFAULT)
68                         goto out_unlock;
69                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
70                         goto out_unlock;
71         }
72
73         ret = 0;
74         *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
75         if (unlikely(*flt & VM_FAULT_ERROR)) {
76                 if (*flt & VM_FAULT_OOM) {
77                         ret = -ENOMEM;
78                         goto out_unlock;
79                 } else if (*flt & VM_FAULT_SIGBUS) {
80                         ret = -EFAULT;
81                         goto out_unlock;
82                 }
83                 BUG();
84         }
85
86         if (*flt & VM_FAULT_MAJOR)
87                 current->maj_flt++;
88         else
89                 current->min_flt++;
90
91 out_unlock:
92         up_read(&mm->mmap_sem);
93         return ret;
94 }
95 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
96
97 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
98 {
99         u64 vsid;
100         int psize, ssize;
101
102         slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
103
104         switch (REGION_ID(ea)) {
105         case USER_REGION_ID:
106                 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
107                 psize = get_slice_psize(mm, ea);
108                 ssize = user_segment_size(ea);
109                 vsid = get_vsid(mm->context.id, ea, ssize);
110                 break;
111         case VMALLOC_REGION_ID:
112                 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
113                 if (ea < VMALLOC_END)
114                         psize = mmu_vmalloc_psize;
115                 else
116                         psize = mmu_io_psize;
117                 ssize = mmu_kernel_ssize;
118                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
119                 break;
120         case KERNEL_REGION_ID:
121                 pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
122                 psize = mmu_linear_psize;
123                 ssize = mmu_kernel_ssize;
124                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
125                 break;
126         default:
127                 pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
128                 return 1;
129         }
130
131         vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER;
132
133         vsid |= mmu_psize_defs[psize].sllp |
134                 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
135
136         slb->vsid = vsid;
137
138         return 0;
139 }
140 EXPORT_SYMBOL_GPL(copro_calculate_slb);
141
142 void copro_flush_all_slbs(struct mm_struct *mm)
143 {
144 #ifdef CONFIG_SPU_BASE
145         spu_flush_all_slbs(mm);
146 #endif
147         cxl_slbia(mm);
148 }
149 EXPORT_SYMBOL_GPL(copro_flush_all_slbs);