arm64: mte: fix prctl(PR_GET_TAGGED_ADDR_CTRL) if TCF0=NONE
[sfrench/cifs-2.6.git] / drivers / misc / mic / scif / scif_rma_list.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel MIC Platform Software Stack (MPSS)
4  *
5  * Copyright(c) 2015 Intel Corporation.
6  *
7  * Intel SCIF driver.
8  */
9 #include "scif_main.h"
10 #include <linux/mmu_notifier.h>
11 #include <linux/highmem.h>
12
13 /*
14  * scif_insert_tcw:
15  *
16  * Insert a temp window to the temp registration list sorted by va_for_temp.
17  * RMA lock must be held.
18  */
19 void scif_insert_tcw(struct scif_window *window, struct list_head *head)
20 {
21         struct scif_window *curr = NULL;
22         struct scif_window *prev = list_entry(head, struct scif_window, list);
23         struct list_head *item;
24
25         INIT_LIST_HEAD(&window->list);
26         /* Compare with tail and if the entry is new tail add it to the end */
27         if (!list_empty(head)) {
28                 curr = list_entry(head->prev, struct scif_window, list);
29                 if (curr->va_for_temp < window->va_for_temp) {
30                         list_add_tail(&window->list, head);
31                         return;
32                 }
33         }
34         list_for_each(item, head) {
35                 curr = list_entry(item, struct scif_window, list);
36                 if (curr->va_for_temp > window->va_for_temp)
37                         break;
38                 prev = curr;
39         }
40         list_add(&window->list, &prev->list);
41 }
42
43 /*
44  * scif_insert_window:
45  *
46  * Insert a window to the self registration list sorted by offset.
47  * RMA lock must be held.
48  */
49 void scif_insert_window(struct scif_window *window, struct list_head *head)
50 {
51         struct scif_window *curr = NULL, *prev = NULL;
52         struct list_head *item;
53
54         INIT_LIST_HEAD(&window->list);
55         list_for_each(item, head) {
56                 curr = list_entry(item, struct scif_window, list);
57                 if (curr->offset > window->offset)
58                         break;
59                 prev = curr;
60         }
61         if (!prev)
62                 list_add(&window->list, head);
63         else
64                 list_add(&window->list, &prev->list);
65         scif_set_window_ref(window, window->nr_pages);
66 }
67
68 /*
69  * scif_query_tcw:
70  *
71  * Query the temp cached registration list of ep for an overlapping window
72  * in case of permission mismatch, destroy the previous window. if permissions
73  * match and overlap is partial, destroy the window but return the new range
74  * RMA lock must be held.
75  */
76 int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
77 {
78         struct list_head *item, *temp, *head = req->head;
79         struct scif_window *window;
80         u64 start_va_window, start_va_req = req->va_for_temp;
81         u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
82
83         if (!req->nr_bytes)
84                 return -EINVAL;
85         /*
86          * Avoid traversing the entire list to find out that there
87          * is no entry that matches
88          */
89         if (!list_empty(head)) {
90                 window = list_last_entry(head, struct scif_window, list);
91                 end_va_window = window->va_for_temp +
92                         (window->nr_pages << PAGE_SHIFT);
93                 if (start_va_req > end_va_window)
94                         return -ENXIO;
95         }
96         list_for_each_safe(item, temp, head) {
97                 window = list_entry(item, struct scif_window, list);
98                 start_va_window = window->va_for_temp;
99                 end_va_window = window->va_for_temp +
100                         (window->nr_pages << PAGE_SHIFT);
101                 if (start_va_req < start_va_window &&
102                     end_va_req < start_va_window)
103                         break;
104                 if (start_va_req >= end_va_window)
105                         continue;
106                 if ((window->prot & req->prot) == req->prot) {
107                         if (start_va_req >= start_va_window &&
108                             end_va_req <= end_va_window) {
109                                 *req->out_window = window;
110                                 return 0;
111                         }
112                         /* expand window */
113                         if (start_va_req < start_va_window) {
114                                 req->nr_bytes +=
115                                         start_va_window - start_va_req;
116                                 req->va_for_temp = start_va_window;
117                         }
118                         if (end_va_req >= end_va_window)
119                                 req->nr_bytes += end_va_window - end_va_req;
120                 }
121                 /* Destroy the old window to create a new one */
122                 __scif_rma_destroy_tcw_helper(window);
123                 break;
124         }
125         return -ENXIO;
126 }
127
128 /*
129  * scif_query_window:
130  *
131  * Query the registration list and check if a valid contiguous
132  * range of windows exist.
133  * RMA lock must be held.
134  */
135 int scif_query_window(struct scif_rma_req *req)
136 {
137         struct list_head *item;
138         struct scif_window *window;
139         s64 end_offset, offset = req->offset;
140         u64 tmp_min, nr_bytes_left = req->nr_bytes;
141
142         if (!req->nr_bytes)
143                 return -EINVAL;
144
145         list_for_each(item, req->head) {
146                 window = list_entry(item, struct scif_window, list);
147                 end_offset = window->offset +
148                         (window->nr_pages << PAGE_SHIFT);
149                 if (offset < window->offset)
150                         /* Offset not found! */
151                         return -ENXIO;
152                 if (offset >= end_offset)
153                         continue;
154                 /* Check read/write protections. */
155                 if ((window->prot & req->prot) != req->prot)
156                         return -EPERM;
157                 if (nr_bytes_left == req->nr_bytes)
158                         /* Store the first window */
159                         *req->out_window = window;
160                 tmp_min = min((u64)end_offset - offset, nr_bytes_left);
161                 nr_bytes_left -= tmp_min;
162                 offset += tmp_min;
163                 /*
164                  * Range requested encompasses
165                  * multiple windows contiguously.
166                  */
167                 if (!nr_bytes_left) {
168                         /* Done for partial window */
169                         if (req->type == SCIF_WINDOW_PARTIAL ||
170                             req->type == SCIF_WINDOW_SINGLE)
171                                 return 0;
172                         /* Extra logic for full windows */
173                         if (offset == end_offset)
174                                 /* Spanning multiple whole windows */
175                                 return 0;
176                                 /* Not spanning multiple whole windows */
177                         return -ENXIO;
178                 }
179                 if (req->type == SCIF_WINDOW_SINGLE)
180                         break;
181         }
182         dev_err(scif_info.mdev.this_device,
183                 "%s %d ENXIO\n", __func__, __LINE__);
184         return -ENXIO;
185 }
186
187 /*
188  * scif_rma_list_unregister:
189  *
190  * Traverse the self registration list starting from window:
191  * 1) Call scif_unregister_window(..)
192  * RMA lock must be held.
193  */
194 int scif_rma_list_unregister(struct scif_window *window,
195                              s64 offset, int nr_pages)
196 {
197         struct scif_endpt *ep = (struct scif_endpt *)window->ep;
198         struct list_head *head = &ep->rma_info.reg_list;
199         s64 end_offset;
200         int err = 0;
201         int loop_nr_pages;
202         struct scif_window *_window;
203
204         list_for_each_entry_safe_from(window, _window, head, list) {
205                 end_offset = window->offset + (window->nr_pages << PAGE_SHIFT);
206                 loop_nr_pages = min((int)((end_offset - offset) >> PAGE_SHIFT),
207                                     nr_pages);
208                 err = scif_unregister_window(window);
209                 if (err)
210                         return err;
211                 nr_pages -= loop_nr_pages;
212                 offset += (loop_nr_pages << PAGE_SHIFT);
213                 if (!nr_pages)
214                         break;
215         }
216         return 0;
217 }
218
219 /*
220  * scif_unmap_all_window:
221  *
222  * Traverse all the windows in the self registration list and:
223  * 1) Delete any DMA mappings created
224  */
225 void scif_unmap_all_windows(scif_epd_t epd)
226 {
227         struct list_head *item, *tmp;
228         struct scif_window *window;
229         struct scif_endpt *ep = (struct scif_endpt *)epd;
230         struct list_head *head = &ep->rma_info.reg_list;
231
232         mutex_lock(&ep->rma_info.rma_lock);
233         list_for_each_safe(item, tmp, head) {
234                 window = list_entry(item, struct scif_window, list);
235                 scif_unmap_window(ep->remote_dev, window);
236         }
237         mutex_unlock(&ep->rma_info.rma_lock);
238 }
239
240 /*
241  * scif_unregister_all_window:
242  *
243  * Traverse all the windows in the self registration list and:
244  * 1) Call scif_unregister_window(..)
245  * RMA lock must be held.
246  */
247 int scif_unregister_all_windows(scif_epd_t epd)
248 {
249         struct list_head *item, *tmp;
250         struct scif_window *window;
251         struct scif_endpt *ep = (struct scif_endpt *)epd;
252         struct list_head *head = &ep->rma_info.reg_list;
253         int err = 0;
254
255         mutex_lock(&ep->rma_info.rma_lock);
256 retry:
257         item = NULL;
258         tmp = NULL;
259         list_for_each_safe(item, tmp, head) {
260                 window = list_entry(item, struct scif_window, list);
261                 ep->rma_info.async_list_del = 0;
262                 err = scif_unregister_window(window);
263                 if (err)
264                         dev_err(scif_info.mdev.this_device,
265                                 "%s %d err %d\n",
266                                 __func__, __LINE__, err);
267                 /*
268                  * Need to restart list traversal if there has been
269                  * an asynchronous list entry deletion.
270                  */
271                 if (READ_ONCE(ep->rma_info.async_list_del))
272                         goto retry;
273         }
274         mutex_unlock(&ep->rma_info.rma_lock);
275         if (!list_empty(&ep->rma_info.mmn_list)) {
276                 spin_lock(&scif_info.rmalock);
277                 list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
278                 spin_unlock(&scif_info.rmalock);
279                 schedule_work(&scif_info.mmu_notif_work);
280         }
281         return err;
282 }