Merge tag 'mvebu-dt-5.1-2' of git://git.infradead.org/linux-mvebu into arm/dt
[sfrench/cifs-2.6.git] / drivers / crypto / cavium / nitrox / nitrox_reqmgr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/gfp.h>
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
5
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9
10 /* SLC_STORE_INFO */
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
13 #define FDATA_SIZE 32
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
16
17 #define REQ_NOT_POSTED 1
18 #define REQ_BACKLOG    2
19 #define REQ_POSTED     3
20
21 /**
22  * Response codes from SE microcode
23  * 0x00 - Success
24  *   Completion with no error
25  * 0x43 - ERR_GC_DATA_LEN_INVALID
26  *   Invalid Data length if Encryption Data length is
27  *   less than 16 bytes for AES-XTS and AES-CTS.
28  * 0x45 - ERR_GC_CTX_LEN_INVALID
29  *   Invalid context length: CTXL != 23 words.
30  * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
31  *   DOCSIS support is enabled with other than
32  *   AES/DES-CBC mode encryption.
33  * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
34  *   Authentication offset is other than 0 with
35  *   Encryption IV source = 0.
36  *   Authentication offset is other than 8 (DES)/16 (AES)
37  *   with Encryption IV source = 1
38  * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
39  *   CRC32 is enabled for other than DOCSIS encryption.
40  * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
41  *   Invalid flag options in AES-CCM IV.
42  */
43
44 static inline int incr_index(int index, int count, int max)
45 {
46         if ((index + count) >= max)
47                 index = index + count - max;
48         else
49                 index += count;
50
51         return index;
52 }
53
54 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
55 {
56         struct nitrox_device *ndev = sr->ndev;
57         struct device *dev = DEV(ndev);
58
59
60         dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
61         dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
62                          DMA_TO_DEVICE);
63         kfree(sr->in.sgcomp);
64         sr->in.sg = NULL;
65         sr->in.sgmap_cnt = 0;
66
67         dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
68                      DMA_BIDIRECTIONAL);
69         dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
70                          DMA_TO_DEVICE);
71         kfree(sr->out.sgcomp);
72         sr->out.sg = NULL;
73         sr->out.sgmap_cnt = 0;
74 }
75
76 static void softreq_destroy(struct nitrox_softreq *sr)
77 {
78         softreq_unmap_sgbufs(sr);
79         kfree(sr);
80 }
81
82 /**
83  * create_sg_component - create SG componets for N5 device.
84  * @sr: Request structure
85  * @sgtbl: SG table
86  * @map_nents: number of dma mapped entries
87  *
88  * Component structure
89  *
90  *   63     48 47     32 31    16 15      0
91  *   --------------------------------------
92  *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
93  *   |-------------------------------------
94  *   |               PTR0                 |
95  *   --------------------------------------
96  *   |               PTR1                 |
97  *   --------------------------------------
98  *   |               PTR2                 |
99  *   --------------------------------------
100  *   |               PTR3                 |
101  *   --------------------------------------
102  *
103  *   Returns 0 if success or a negative errno code on error.
104  */
105 static int create_sg_component(struct nitrox_softreq *sr,
106                                struct nitrox_sgtable *sgtbl, int map_nents)
107 {
108         struct nitrox_device *ndev = sr->ndev;
109         struct nitrox_sgcomp *sgcomp;
110         struct scatterlist *sg;
111         dma_addr_t dma;
112         size_t sz_comp;
113         int i, j, nr_sgcomp;
114
115         nr_sgcomp = roundup(map_nents, 4) / 4;
116
117         /* each component holds 4 dma pointers */
118         sz_comp = nr_sgcomp * sizeof(*sgcomp);
119         sgcomp = kzalloc(sz_comp, sr->gfp);
120         if (!sgcomp)
121                 return -ENOMEM;
122
123         sgtbl->sgcomp = sgcomp;
124
125         sg = sgtbl->sg;
126         /* populate device sg component */
127         for (i = 0; i < nr_sgcomp; i++) {
128                 for (j = 0; j < 4 && sg; j++) {
129                         sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg));
130                         sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg));
131                         sg = sg_next(sg);
132                 }
133         }
134         /* map the device sg component */
135         dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
136         if (dma_mapping_error(DEV(ndev), dma)) {
137                 kfree(sgtbl->sgcomp);
138                 sgtbl->sgcomp = NULL;
139                 return -ENOMEM;
140         }
141
142         sgtbl->sgcomp_dma = dma;
143         sgtbl->sgcomp_len = sz_comp;
144
145         return 0;
146 }
147
148 /**
149  * dma_map_inbufs - DMA map input sglist and creates sglist component
150  *                  for N5 device.
151  * @sr: Request structure
152  * @req: Crypto request structre
153  *
154  * Returns 0 if successful or a negative errno code on error.
155  */
156 static int dma_map_inbufs(struct nitrox_softreq *sr,
157                           struct se_crypto_request *req)
158 {
159         struct device *dev = DEV(sr->ndev);
160         struct scatterlist *sg = req->src;
161         int i, nents, ret = 0;
162
163         nents = dma_map_sg(dev, req->src, sg_nents(req->src),
164                            DMA_BIDIRECTIONAL);
165         if (!nents)
166                 return -EINVAL;
167
168         for_each_sg(req->src, sg, nents, i)
169                 sr->in.total_bytes += sg_dma_len(sg);
170
171         sr->in.sg = req->src;
172         sr->in.sgmap_cnt = nents;
173         ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
174         if (ret)
175                 goto incomp_err;
176
177         return 0;
178
179 incomp_err:
180         dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
181         sr->in.sgmap_cnt = 0;
182         return ret;
183 }
184
185 static int dma_map_outbufs(struct nitrox_softreq *sr,
186                            struct se_crypto_request *req)
187 {
188         struct device *dev = DEV(sr->ndev);
189         int nents, ret = 0;
190
191         nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
192                            DMA_BIDIRECTIONAL);
193         if (!nents)
194                 return -EINVAL;
195
196         sr->out.sg = req->dst;
197         sr->out.sgmap_cnt = nents;
198         ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
199         if (ret)
200                 goto outcomp_map_err;
201
202         return 0;
203
204 outcomp_map_err:
205         dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
206         sr->out.sgmap_cnt = 0;
207         sr->out.sg = NULL;
208         return ret;
209 }
210
211 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
212                                     struct se_crypto_request *creq)
213 {
214         int ret;
215
216         ret = dma_map_inbufs(sr, creq);
217         if (ret)
218                 return ret;
219
220         ret = dma_map_outbufs(sr, creq);
221         if (ret)
222                 softreq_unmap_sgbufs(sr);
223
224         return ret;
225 }
226
227 static inline void backlog_list_add(struct nitrox_softreq *sr,
228                                     struct nitrox_cmdq *cmdq)
229 {
230         INIT_LIST_HEAD(&sr->backlog);
231
232         spin_lock_bh(&cmdq->backlog_qlock);
233         list_add_tail(&sr->backlog, &cmdq->backlog_head);
234         atomic_inc(&cmdq->backlog_count);
235         atomic_set(&sr->status, REQ_BACKLOG);
236         spin_unlock_bh(&cmdq->backlog_qlock);
237 }
238
239 static inline void response_list_add(struct nitrox_softreq *sr,
240                                      struct nitrox_cmdq *cmdq)
241 {
242         INIT_LIST_HEAD(&sr->response);
243
244         spin_lock_bh(&cmdq->resp_qlock);
245         list_add_tail(&sr->response, &cmdq->response_head);
246         spin_unlock_bh(&cmdq->resp_qlock);
247 }
248
249 static inline void response_list_del(struct nitrox_softreq *sr,
250                                      struct nitrox_cmdq *cmdq)
251 {
252         spin_lock_bh(&cmdq->resp_qlock);
253         list_del(&sr->response);
254         spin_unlock_bh(&cmdq->resp_qlock);
255 }
256
257 static struct nitrox_softreq *
258 get_first_response_entry(struct nitrox_cmdq *cmdq)
259 {
260         return list_first_entry_or_null(&cmdq->response_head,
261                                         struct nitrox_softreq, response);
262 }
263
264 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
265 {
266         if (atomic_inc_return(&cmdq->pending_count) > qlen) {
267                 atomic_dec(&cmdq->pending_count);
268                 /* sync with other cpus */
269                 smp_mb__after_atomic();
270                 return true;
271         }
272         /* sync with other cpus */
273         smp_mb__after_atomic();
274         return false;
275 }
276
277 /**
278  * post_se_instr - Post SE instruction to Packet Input ring
279  * @sr: Request structure
280  *
281  * Returns 0 if successful or a negative error code,
282  * if no space in ring.
283  */
284 static void post_se_instr(struct nitrox_softreq *sr,
285                           struct nitrox_cmdq *cmdq)
286 {
287         struct nitrox_device *ndev = sr->ndev;
288         int idx;
289         u8 *ent;
290
291         spin_lock_bh(&cmdq->cmd_qlock);
292
293         idx = cmdq->write_idx;
294         /* copy the instruction */
295         ent = cmdq->base + (idx * cmdq->instr_size);
296         memcpy(ent, &sr->instr, cmdq->instr_size);
297
298         atomic_set(&sr->status, REQ_POSTED);
299         response_list_add(sr, cmdq);
300         sr->tstamp = jiffies;
301         /* flush the command queue updates */
302         dma_wmb();
303
304         /* Ring doorbell with count 1 */
305         writeq(1, cmdq->dbell_csr_addr);
306         /* orders the doorbell rings */
307         mmiowb();
308
309         cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
310
311         spin_unlock_bh(&cmdq->cmd_qlock);
312
313         /* increment the posted command count */
314         atomic64_inc(&ndev->stats.posted);
315 }
316
317 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
318 {
319         struct nitrox_device *ndev = cmdq->ndev;
320         struct nitrox_softreq *sr, *tmp;
321         int ret = 0;
322
323         if (!atomic_read(&cmdq->backlog_count))
324                 return 0;
325
326         spin_lock_bh(&cmdq->backlog_qlock);
327
328         list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
329                 /* submit until space available */
330                 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
331                         ret = -ENOSPC;
332                         break;
333                 }
334                 /* delete from backlog list */
335                 list_del(&sr->backlog);
336                 atomic_dec(&cmdq->backlog_count);
337                 /* sync with other cpus */
338                 smp_mb__after_atomic();
339
340                 /* post the command */
341                 post_se_instr(sr, cmdq);
342         }
343         spin_unlock_bh(&cmdq->backlog_qlock);
344
345         return ret;
346 }
347
348 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
349 {
350         struct nitrox_cmdq *cmdq = sr->cmdq;
351         struct nitrox_device *ndev = sr->ndev;
352
353         /* try to post backlog requests */
354         post_backlog_cmds(cmdq);
355
356         if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
357                 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
358                         /* increment drop count */
359                         atomic64_inc(&ndev->stats.dropped);
360                         return -ENOSPC;
361                 }
362                 /* add to backlog list */
363                 backlog_list_add(sr, cmdq);
364                 return -EINPROGRESS;
365         }
366         post_se_instr(sr, cmdq);
367
368         return -EINPROGRESS;
369 }
370
371 /**
372  * nitrox_se_request - Send request to SE core
373  * @ndev: NITROX device
374  * @req: Crypto request
375  *
376  * Returns 0 on success, or a negative error code.
377  */
378 int nitrox_process_se_request(struct nitrox_device *ndev,
379                               struct se_crypto_request *req,
380                               completion_t callback,
381                               void *cb_arg)
382 {
383         struct nitrox_softreq *sr;
384         dma_addr_t ctx_handle = 0;
385         int qno, ret = 0;
386
387         if (!nitrox_ready(ndev))
388                 return -ENODEV;
389
390         sr = kzalloc(sizeof(*sr), req->gfp);
391         if (!sr)
392                 return -ENOMEM;
393
394         sr->ndev = ndev;
395         sr->flags = req->flags;
396         sr->gfp = req->gfp;
397         sr->callback = callback;
398         sr->cb_arg = cb_arg;
399
400         atomic_set(&sr->status, REQ_NOT_POSTED);
401
402         sr->resp.orh = req->orh;
403         sr->resp.completion = req->comp;
404
405         ret = softreq_map_iobuf(sr, req);
406         if (ret) {
407                 kfree(sr);
408                 return ret;
409         }
410
411         /* get the context handle */
412         if (req->ctx_handle) {
413                 struct ctx_hdr *hdr;
414                 u8 *ctx_ptr;
415
416                 ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
417                 hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
418                 ctx_handle = hdr->ctx_dma;
419         }
420
421         /* select the queue */
422         qno = smp_processor_id() % ndev->nr_queues;
423
424         sr->cmdq = &ndev->pkt_inq[qno];
425
426         /*
427          * 64-Byte Instruction Format
428          *
429          *  ----------------------
430          *  |      DPTR0         | 8 bytes
431          *  ----------------------
432          *  |  PKT_IN_INSTR_HDR  | 8 bytes
433          *  ----------------------
434          *  |    PKT_IN_HDR      | 16 bytes
435          *  ----------------------
436          *  |    SLC_INFO        | 16 bytes
437          *  ----------------------
438          *  |   Front data       | 16 bytes
439          *  ----------------------
440          */
441
442         /* fill the packet instruction */
443         /* word 0 */
444         sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
445
446         /* word 1 */
447         sr->instr.ih.value = 0;
448         sr->instr.ih.s.g = 1;
449         sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
450         sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
451         sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
452         sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
453         sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
454
455         /* word 2 */
456         sr->instr.irh.value[0] = 0;
457         sr->instr.irh.s.uddl = MIN_UDD_LEN;
458         /* context length in 64-bit words */
459         sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
460         /* offset from solicit base port 256 */
461         sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
462         sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
463         sr->instr.irh.s.arg = req->ctrl.s.arg;
464         sr->instr.irh.s.opcode = req->opcode;
465         sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
466
467         /* word 3 */
468         sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
469
470         /* word 4 */
471         sr->instr.slc.value[0] = 0;
472         sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
473         sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
474
475         /* word 5 */
476         sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
477
478         /*
479          * No conversion for front data,
480          * It goes into payload
481          * put GP Header in front data
482          */
483         sr->instr.fdata[0] = *((u64 *)&req->gph);
484         sr->instr.fdata[1] = 0;
485
486         ret = nitrox_enqueue_request(sr);
487         if (ret == -ENOSPC)
488                 goto send_fail;
489
490         return ret;
491
492 send_fail:
493         softreq_destroy(sr);
494         return ret;
495 }
496
497 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
498 {
499         return time_after_eq(jiffies, (tstamp + timeout));
500 }
501
502 void backlog_qflush_work(struct work_struct *work)
503 {
504         struct nitrox_cmdq *cmdq;
505
506         cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
507         post_backlog_cmds(cmdq);
508 }
509
510 static bool sr_completed(struct nitrox_softreq *sr)
511 {
512         u64 orh = READ_ONCE(*sr->resp.orh);
513         unsigned long timeout = jiffies + msecs_to_jiffies(1);
514
515         if ((orh != PENDING_SIG) && (orh & 0xff))
516                 return true;
517
518         while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
519                 if (time_after(jiffies, timeout)) {
520                         pr_err("comp not done\n");
521                         return false;
522                 }
523         }
524
525         return true;
526 }
527
528 /**
529  * process_request_list - process completed requests
530  * @ndev: N5 device
531  * @qno: queue to operate
532  *
533  * Returns the number of responses processed.
534  */
535 static void process_response_list(struct nitrox_cmdq *cmdq)
536 {
537         struct nitrox_device *ndev = cmdq->ndev;
538         struct nitrox_softreq *sr;
539         int req_completed = 0, err = 0, budget;
540
541         /* check all pending requests */
542         budget = atomic_read(&cmdq->pending_count);
543
544         while (req_completed < budget) {
545                 sr = get_first_response_entry(cmdq);
546                 if (!sr)
547                         break;
548
549                 if (atomic_read(&sr->status) != REQ_POSTED)
550                         break;
551
552                 /* check orh and completion bytes updates */
553                 if (!sr_completed(sr)) {
554                         /* request not completed, check for timeout */
555                         if (!cmd_timeout(sr->tstamp, ndev->timeout))
556                                 break;
557                         dev_err_ratelimited(DEV(ndev),
558                                             "Request timeout, orh 0x%016llx\n",
559                                             READ_ONCE(*sr->resp.orh));
560                 }
561                 atomic_dec(&cmdq->pending_count);
562                 atomic64_inc(&ndev->stats.completed);
563                 /* sync with other cpus */
564                 smp_mb__after_atomic();
565                 /* remove from response list */
566                 response_list_del(sr, cmdq);
567
568                 /* ORH error code */
569                 err = READ_ONCE(*sr->resp.orh) & 0xff;
570
571                 if (sr->callback)
572                         sr->callback(sr->cb_arg, err);
573                 softreq_destroy(sr);
574
575                 req_completed++;
576         }
577 }
578
579 /**
580  * pkt_slc_resp_tasklet - post processing of SE responses
581  */
582 void pkt_slc_resp_tasklet(unsigned long data)
583 {
584         struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
585         struct nitrox_cmdq *cmdq = qvec->cmdq;
586         union nps_pkt_slc_cnts slc_cnts;
587
588         /* read completion count */
589         slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
590         /* resend the interrupt if more work to do */
591         slc_cnts.s.resend = 1;
592
593         process_response_list(cmdq);
594
595         /*
596          * clear the interrupt with resend bit enabled,
597          * MSI-X interrupt generates if Completion count > Threshold
598          */
599         writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
600         /* order the writes */
601         mmiowb();
602
603         if (atomic_read(&cmdq->backlog_count))
604                 schedule_work(&cmdq->backlog_qflush);
605 }