Merge tag 'meminit-v5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[sfrench/cifs-2.6.git] / drivers / misc / habanalabs / command_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13
14 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
15 {
16         hdev->asic_funcs->dma_free_coherent(hdev, cb->size,
17                         (void *) (uintptr_t) cb->kernel_address,
18                         cb->bus_address);
19         kfree(cb);
20 }
21
22 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
23 {
24         if (cb->is_pool) {
25                 spin_lock(&hdev->cb_pool_lock);
26                 list_add(&cb->pool_list, &hdev->cb_pool);
27                 spin_unlock(&hdev->cb_pool_lock);
28         } else {
29                 cb_fini(hdev, cb);
30         }
31 }
32
33 static void cb_release(struct kref *ref)
34 {
35         struct hl_device *hdev;
36         struct hl_cb *cb;
37
38         cb = container_of(ref, struct hl_cb, refcount);
39         hdev = cb->hdev;
40
41         hl_debugfs_remove_cb(cb);
42
43         cb_do_release(hdev, cb);
44 }
45
46 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
47                                         int ctx_id)
48 {
49         struct hl_cb *cb;
50         void *p;
51
52         /*
53          * We use of GFP_ATOMIC here because this function can be called from
54          * the latency-sensitive code path for command submission. Due to H/W
55          * limitations in some of the ASICs, the kernel must copy the user CB
56          * that is designated for an external queue and actually enqueue
57          * the kernel's copy. Hence, we must never sleep in this code section
58          * and must use GFP_ATOMIC for all memory allocations.
59          */
60         if (ctx_id == HL_KERNEL_ASID_ID)
61                 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
62         else
63                 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
64
65         if (!cb)
66                 return NULL;
67
68         if (ctx_id == HL_KERNEL_ASID_ID)
69                 p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
70                                                 &cb->bus_address, GFP_ATOMIC);
71         else
72                 p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
73                                                 &cb->bus_address,
74                                                 GFP_USER | __GFP_ZERO);
75         if (!p) {
76                 dev_err(hdev->dev,
77                         "failed to allocate %d of dma memory for CB\n",
78                         cb_size);
79                 kfree(cb);
80                 return NULL;
81         }
82
83         cb->kernel_address = (u64) (uintptr_t) p;
84         cb->size = cb_size;
85
86         return cb;
87 }
88
89 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
90                         u32 cb_size, u64 *handle, int ctx_id)
91 {
92         struct hl_cb *cb;
93         bool alloc_new_cb = true;
94         int rc;
95
96         /*
97          * Can't use generic function to check this because of special case
98          * where we create a CB as part of the reset process
99          */
100         if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
101                                         (ctx_id != HL_KERNEL_ASID_ID))) {
102                 dev_warn_ratelimited(hdev->dev,
103                         "Device is disabled or in reset. Can't create new CBs\n");
104                 rc = -EBUSY;
105                 goto out_err;
106         }
107
108         if (cb_size > HL_MAX_CB_SIZE) {
109                 dev_err(hdev->dev,
110                         "CB size %d must be less then %d\n",
111                         cb_size, HL_MAX_CB_SIZE);
112                 rc = -EINVAL;
113                 goto out_err;
114         }
115
116         /* Minimum allocation must be PAGE SIZE */
117         if (cb_size < PAGE_SIZE)
118                 cb_size = PAGE_SIZE;
119
120         if (ctx_id == HL_KERNEL_ASID_ID &&
121                         cb_size <= hdev->asic_prop.cb_pool_cb_size) {
122
123                 spin_lock(&hdev->cb_pool_lock);
124                 if (!list_empty(&hdev->cb_pool)) {
125                         cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
126                                         pool_list);
127                         list_del(&cb->pool_list);
128                         spin_unlock(&hdev->cb_pool_lock);
129                         alloc_new_cb = false;
130                 } else {
131                         spin_unlock(&hdev->cb_pool_lock);
132                         dev_dbg(hdev->dev, "CB pool is empty\n");
133                 }
134         }
135
136         if (alloc_new_cb) {
137                 cb = hl_cb_alloc(hdev, cb_size, ctx_id);
138                 if (!cb) {
139                         rc = -ENOMEM;
140                         goto out_err;
141                 }
142         }
143
144         cb->hdev = hdev;
145         cb->ctx_id = ctx_id;
146
147         spin_lock(&mgr->cb_lock);
148         rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
149         spin_unlock(&mgr->cb_lock);
150
151         if (rc < 0) {
152                 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
153                 goto release_cb;
154         }
155
156         cb->id = rc;
157
158         kref_init(&cb->refcount);
159         spin_lock_init(&cb->lock);
160
161         /*
162          * idr is 32-bit so we can safely OR it with a mask that is above
163          * 32 bit
164          */
165         *handle = cb->id | HL_MMAP_CB_MASK;
166         *handle <<= PAGE_SHIFT;
167
168         hl_debugfs_add_cb(cb);
169
170         return 0;
171
172 release_cb:
173         cb_do_release(hdev, cb);
174 out_err:
175         *handle = 0;
176
177         return rc;
178 }
179
180 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
181 {
182         struct hl_cb *cb;
183         u32 handle;
184         int rc = 0;
185
186         /*
187          * handle was given to user to do mmap, I need to shift it back to
188          * how the idr module gave it to me
189          */
190         cb_handle >>= PAGE_SHIFT;
191         handle = (u32) cb_handle;
192
193         spin_lock(&mgr->cb_lock);
194
195         cb = idr_find(&mgr->cb_handles, handle);
196         if (cb) {
197                 idr_remove(&mgr->cb_handles, handle);
198                 spin_unlock(&mgr->cb_lock);
199                 kref_put(&cb->refcount, cb_release);
200         } else {
201                 spin_unlock(&mgr->cb_lock);
202                 dev_err(hdev->dev,
203                         "CB destroy failed, no match to handle 0x%x\n", handle);
204                 rc = -EINVAL;
205         }
206
207         return rc;
208 }
209
210 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
211 {
212         union hl_cb_args *args = data;
213         struct hl_device *hdev = hpriv->hdev;
214         u64 handle;
215         int rc;
216
217         switch (args->in.op) {
218         case HL_CB_OP_CREATE:
219                 rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
220                                         &handle, hpriv->ctx->asid);
221                 memset(args, 0, sizeof(*args));
222                 args->out.cb_handle = handle;
223                 break;
224         case HL_CB_OP_DESTROY:
225                 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
226                                         args->in.cb_handle);
227                 break;
228         default:
229                 rc = -ENOTTY;
230                 break;
231         }
232
233         return rc;
234 }
235
236 static void cb_vm_close(struct vm_area_struct *vma)
237 {
238         struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
239         long new_mmap_size;
240
241         new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
242
243         if (new_mmap_size > 0) {
244                 cb->mmap_size = new_mmap_size;
245                 return;
246         }
247
248         spin_lock(&cb->lock);
249         cb->mmap = false;
250         spin_unlock(&cb->lock);
251
252         hl_cb_put(cb);
253         vma->vm_private_data = NULL;
254 }
255
256 static const struct vm_operations_struct cb_vm_ops = {
257         .close = cb_vm_close
258 };
259
260 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
261 {
262         struct hl_device *hdev = hpriv->hdev;
263         struct hl_cb *cb;
264         phys_addr_t address;
265         u32 handle;
266         int rc;
267
268         handle = vma->vm_pgoff;
269
270         /* reference was taken here */
271         cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
272         if (!cb) {
273                 dev_err(hdev->dev,
274                         "CB mmap failed, no match to handle %d\n", handle);
275                 return -EINVAL;
276         }
277
278         /* Validation check */
279         if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
280                 dev_err(hdev->dev,
281                         "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
282                         vma->vm_end - vma->vm_start, cb->size);
283                 rc = -EINVAL;
284                 goto put_cb;
285         }
286
287         spin_lock(&cb->lock);
288
289         if (cb->mmap) {
290                 dev_err(hdev->dev,
291                         "CB mmap failed, CB already mmaped to user\n");
292                 rc = -EINVAL;
293                 goto release_lock;
294         }
295
296         cb->mmap = true;
297
298         spin_unlock(&cb->lock);
299
300         vma->vm_ops = &cb_vm_ops;
301
302         /*
303          * Note: We're transferring the cb reference to
304          * vma->vm_private_data here.
305          */
306
307         vma->vm_private_data = cb;
308
309         /* Calculate address for CB */
310         address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);
311
312         rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
313                                         address, cb->size);
314
315         if (rc) {
316                 spin_lock(&cb->lock);
317                 cb->mmap = false;
318                 goto release_lock;
319         }
320
321         cb->mmap_size = cb->size;
322
323         return 0;
324
325 release_lock:
326         spin_unlock(&cb->lock);
327 put_cb:
328         hl_cb_put(cb);
329         return rc;
330 }
331
332 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
333                         u32 handle)
334 {
335         struct hl_cb *cb;
336
337         spin_lock(&mgr->cb_lock);
338         cb = idr_find(&mgr->cb_handles, handle);
339
340         if (!cb) {
341                 spin_unlock(&mgr->cb_lock);
342                 dev_warn(hdev->dev,
343                         "CB get failed, no match to handle %d\n", handle);
344                 return NULL;
345         }
346
347         kref_get(&cb->refcount);
348
349         spin_unlock(&mgr->cb_lock);
350
351         return cb;
352
353 }
354
355 void hl_cb_put(struct hl_cb *cb)
356 {
357         kref_put(&cb->refcount, cb_release);
358 }
359
360 void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
361 {
362         spin_lock_init(&mgr->cb_lock);
363         idr_init(&mgr->cb_handles);
364 }
365
366 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
367 {
368         struct hl_cb *cb;
369         struct idr *idp;
370         u32 id;
371
372         idp = &mgr->cb_handles;
373
374         idr_for_each_entry(idp, cb, id) {
375                 if (kref_put(&cb->refcount, cb_release) != 1)
376                         dev_err(hdev->dev,
377                                 "CB %d for CTX ID %d is still alive\n",
378                                 id, cb->ctx_id);
379         }
380
381         idr_destroy(&mgr->cb_handles);
382 }
383
384 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
385 {
386         u64 cb_handle;
387         struct hl_cb *cb;
388         int rc;
389
390         rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
391                         HL_KERNEL_ASID_ID);
392         if (rc) {
393                 dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc);
394                 return NULL;
395         }
396
397         cb_handle >>= PAGE_SHIFT;
398         cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
399         /* hl_cb_get should never fail here so use kernel WARN */
400         WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
401         if (!cb)
402                 goto destroy_cb;
403
404         return cb;
405
406 destroy_cb:
407         hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
408
409         return NULL;
410 }
411
412 int hl_cb_pool_init(struct hl_device *hdev)
413 {
414         struct hl_cb *cb;
415         int i;
416
417         INIT_LIST_HEAD(&hdev->cb_pool);
418         spin_lock_init(&hdev->cb_pool_lock);
419
420         for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
421                 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
422                                 HL_KERNEL_ASID_ID);
423                 if (cb) {
424                         cb->is_pool = true;
425                         list_add(&cb->pool_list, &hdev->cb_pool);
426                 } else {
427                         hl_cb_pool_fini(hdev);
428                         return -ENOMEM;
429                 }
430         }
431
432         return 0;
433 }
434
435 int hl_cb_pool_fini(struct hl_device *hdev)
436 {
437         struct hl_cb *cb, *tmp;
438
439         list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
440                 list_del(&cb->pool_list);
441                 cb_fini(hdev, cb);
442         }
443
444         return 0;
445 }