Merge tag 'iommu-fixes-v4.20-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / crypto / hisilicon / sec / sec_algs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 #include <linux/crypto.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/dmapool.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
9
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/des.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <crypto/internal/skcipher.h>
16
17 #include "sec_drv.h"
18
19 #define SEC_MAX_CIPHER_KEY              64
20 #define SEC_REQ_LIMIT SZ_32M
21
22 struct sec_c_alg_cfg {
23         unsigned c_alg          : 3;
24         unsigned c_mode         : 3;
25         unsigned key_len        : 2;
26         unsigned c_width        : 2;
27 };
28
29 static const struct sec_c_alg_cfg sec_c_alg_cfgs[] =  {
30         [SEC_C_DES_ECB_64] = {
31                 .c_alg = SEC_C_ALG_DES,
32                 .c_mode = SEC_C_MODE_ECB,
33                 .key_len = SEC_KEY_LEN_DES,
34         },
35         [SEC_C_DES_CBC_64] = {
36                 .c_alg = SEC_C_ALG_DES,
37                 .c_mode = SEC_C_MODE_CBC,
38                 .key_len = SEC_KEY_LEN_DES,
39         },
40         [SEC_C_3DES_ECB_192_3KEY] = {
41                 .c_alg = SEC_C_ALG_3DES,
42                 .c_mode = SEC_C_MODE_ECB,
43                 .key_len = SEC_KEY_LEN_3DES_3_KEY,
44         },
45         [SEC_C_3DES_ECB_192_2KEY] = {
46                 .c_alg = SEC_C_ALG_3DES,
47                 .c_mode = SEC_C_MODE_ECB,
48                 .key_len = SEC_KEY_LEN_3DES_2_KEY,
49         },
50         [SEC_C_3DES_CBC_192_3KEY] = {
51                 .c_alg = SEC_C_ALG_3DES,
52                 .c_mode = SEC_C_MODE_CBC,
53                 .key_len = SEC_KEY_LEN_3DES_3_KEY,
54         },
55         [SEC_C_3DES_CBC_192_2KEY] = {
56                 .c_alg = SEC_C_ALG_3DES,
57                 .c_mode = SEC_C_MODE_CBC,
58                 .key_len = SEC_KEY_LEN_3DES_2_KEY,
59         },
60         [SEC_C_AES_ECB_128] = {
61                 .c_alg = SEC_C_ALG_AES,
62                 .c_mode = SEC_C_MODE_ECB,
63                 .key_len = SEC_KEY_LEN_AES_128,
64         },
65         [SEC_C_AES_ECB_192] = {
66                 .c_alg = SEC_C_ALG_AES,
67                 .c_mode = SEC_C_MODE_ECB,
68                 .key_len = SEC_KEY_LEN_AES_192,
69         },
70         [SEC_C_AES_ECB_256] = {
71                 .c_alg = SEC_C_ALG_AES,
72                 .c_mode = SEC_C_MODE_ECB,
73                 .key_len = SEC_KEY_LEN_AES_256,
74         },
75         [SEC_C_AES_CBC_128] = {
76                 .c_alg = SEC_C_ALG_AES,
77                 .c_mode = SEC_C_MODE_CBC,
78                 .key_len = SEC_KEY_LEN_AES_128,
79         },
80         [SEC_C_AES_CBC_192] = {
81                 .c_alg = SEC_C_ALG_AES,
82                 .c_mode = SEC_C_MODE_CBC,
83                 .key_len = SEC_KEY_LEN_AES_192,
84         },
85         [SEC_C_AES_CBC_256] = {
86                 .c_alg = SEC_C_ALG_AES,
87                 .c_mode = SEC_C_MODE_CBC,
88                 .key_len = SEC_KEY_LEN_AES_256,
89         },
90         [SEC_C_AES_CTR_128] = {
91                 .c_alg = SEC_C_ALG_AES,
92                 .c_mode = SEC_C_MODE_CTR,
93                 .key_len = SEC_KEY_LEN_AES_128,
94         },
95         [SEC_C_AES_CTR_192] = {
96                 .c_alg = SEC_C_ALG_AES,
97                 .c_mode = SEC_C_MODE_CTR,
98                 .key_len = SEC_KEY_LEN_AES_192,
99         },
100         [SEC_C_AES_CTR_256] = {
101                 .c_alg = SEC_C_ALG_AES,
102                 .c_mode = SEC_C_MODE_CTR,
103                 .key_len = SEC_KEY_LEN_AES_256,
104         },
105         [SEC_C_AES_XTS_128] = {
106                 .c_alg = SEC_C_ALG_AES,
107                 .c_mode = SEC_C_MODE_XTS,
108                 .key_len = SEC_KEY_LEN_AES_128,
109         },
110         [SEC_C_AES_XTS_256] = {
111                 .c_alg = SEC_C_ALG_AES,
112                 .c_mode = SEC_C_MODE_XTS,
113                 .key_len = SEC_KEY_LEN_AES_256,
114         },
115         [SEC_C_NULL] = {
116         },
117 };
118
119 /*
120  * Mutex used to ensure safe operation of reference count of
121  * alg providers
122  */
123 static DEFINE_MUTEX(algs_lock);
124 static unsigned int active_devs;
125
126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
127                                            struct sec_bd_info *req,
128                                            enum sec_cipher_alg alg)
129 {
130         const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
131
132         memset(req, 0, sizeof(*req));
133         req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
134         req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
135         req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
136         req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
137
138         req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
139         req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
140 }
141
142 static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
143                                           const u8 *key,
144                                           unsigned int keylen,
145                                           enum sec_cipher_alg alg)
146 {
147         struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
148         struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
149
150         ctx->cipher_alg = alg;
151         memcpy(ctx->key, key, keylen);
152         sec_alg_skcipher_init_template(ctx, &ctx->req_template,
153                                        ctx->cipher_alg);
154 }
155
156 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
157                                      dma_addr_t *psec_sgl,
158                                      struct scatterlist *sgl,
159                                      int count,
160                                      struct sec_dev_info *info)
161 {
162         struct sec_hw_sgl *sgl_current = NULL;
163         struct sec_hw_sgl *sgl_next;
164         dma_addr_t sgl_next_dma;
165         struct scatterlist *sg;
166         int ret, sge_index, i;
167
168         if (!count)
169                 return -EINVAL;
170
171         for_each_sg(sgl, sg, count, i) {
172                 sge_index = i % SEC_MAX_SGE_NUM;
173                 if (sge_index == 0) {
174                         sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
175                                                    GFP_KERNEL, &sgl_next_dma);
176                         if (!sgl_next) {
177                                 ret = -ENOMEM;
178                                 goto err_free_hw_sgls;
179                         }
180
181                         if (!sgl_current) { /* First one */
182                                 *psec_sgl = sgl_next_dma;
183                                 *sec_sgl = sgl_next;
184                         } else { /* Chained */
185                                 sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
186                                 sgl_current->next_sgl = sgl_next_dma;
187                                 sgl_current->next = sgl_next;
188                         }
189                         sgl_current = sgl_next;
190                 }
191                 sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
192                 sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
193                 sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
194         }
195         sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
196         sgl_current->next_sgl = 0;
197         (*sec_sgl)->entry_sum_in_chain = count;
198
199         return 0;
200
201 err_free_hw_sgls:
202         sgl_current = *sec_sgl;
203         while (sgl_current) {
204                 sgl_next = sgl_current->next;
205                 dma_pool_free(info->hw_sgl_pool, sgl_current,
206                               sgl_current->next_sgl);
207                 sgl_current = sgl_next;
208         }
209         *psec_sgl = 0;
210
211         return ret;
212 }
213
214 static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
215                             dma_addr_t psec_sgl, struct sec_dev_info *info)
216 {
217         struct sec_hw_sgl *sgl_current, *sgl_next;
218
219         if (!hw_sgl)
220                 return;
221         sgl_current = hw_sgl;
222         while (sgl_current->next) {
223                 sgl_next = sgl_current->next;
224                 dma_pool_free(info->hw_sgl_pool, sgl_current,
225                               sgl_current->next_sgl);
226                 sgl_current = sgl_next;
227         }
228         dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
229 }
230
231 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
232                                    const u8 *key, unsigned int keylen,
233                                    enum sec_cipher_alg alg)
234 {
235         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
236         struct device *dev = ctx->queue->dev_info->dev;
237
238         mutex_lock(&ctx->lock);
239         if (ctx->key) {
240                 /* rekeying */
241                 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
242         } else {
243                 /* new key */
244                 ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
245                                                &ctx->pkey, GFP_KERNEL);
246                 if (!ctx->key) {
247                         mutex_unlock(&ctx->lock);
248                         return -ENOMEM;
249                 }
250         }
251         mutex_unlock(&ctx->lock);
252         sec_alg_skcipher_init_context(tfm, key, keylen, alg);
253
254         return 0;
255 }
256
257 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
258                                            const u8 *key, unsigned int keylen)
259 {
260         enum sec_cipher_alg alg;
261
262         switch (keylen) {
263         case AES_KEYSIZE_128:
264                 alg = SEC_C_AES_ECB_128;
265                 break;
266         case AES_KEYSIZE_192:
267                 alg = SEC_C_AES_ECB_192;
268                 break;
269         case AES_KEYSIZE_256:
270                 alg = SEC_C_AES_ECB_256;
271                 break;
272         default:
273                 return -EINVAL;
274         }
275
276         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
277 }
278
279 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
280                                            const u8 *key, unsigned int keylen)
281 {
282         enum sec_cipher_alg alg;
283
284         switch (keylen) {
285         case AES_KEYSIZE_128:
286                 alg = SEC_C_AES_CBC_128;
287                 break;
288         case AES_KEYSIZE_192:
289                 alg = SEC_C_AES_CBC_192;
290                 break;
291         case AES_KEYSIZE_256:
292                 alg = SEC_C_AES_CBC_256;
293                 break;
294         default:
295                 return -EINVAL;
296         }
297
298         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
299 }
300
301 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
302                                            const u8 *key, unsigned int keylen)
303 {
304         enum sec_cipher_alg alg;
305
306         switch (keylen) {
307         case AES_KEYSIZE_128:
308                 alg = SEC_C_AES_CTR_128;
309                 break;
310         case AES_KEYSIZE_192:
311                 alg = SEC_C_AES_CTR_192;
312                 break;
313         case AES_KEYSIZE_256:
314                 alg = SEC_C_AES_CTR_256;
315                 break;
316         default:
317                 return -EINVAL;
318         }
319
320         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
321 }
322
323 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
324                                            const u8 *key, unsigned int keylen)
325 {
326         enum sec_cipher_alg alg;
327         int ret;
328
329         ret = xts_verify_key(tfm, key, keylen);
330         if (ret)
331                 return ret;
332
333         switch (keylen) {
334         case AES_KEYSIZE_128 * 2:
335                 alg = SEC_C_AES_XTS_128;
336                 break;
337         case AES_KEYSIZE_256 * 2:
338                 alg = SEC_C_AES_XTS_256;
339                 break;
340         default:
341                 return -EINVAL;
342         }
343
344         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
345 }
346
347 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
348                                            const u8 *key, unsigned int keylen)
349 {
350         if (keylen != DES_KEY_SIZE)
351                 return -EINVAL;
352
353         return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
354 }
355
356 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
357                                            const u8 *key, unsigned int keylen)
358 {
359         if (keylen != DES_KEY_SIZE)
360                 return -EINVAL;
361
362         return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
363 }
364
365 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
366                                             const u8 *key, unsigned int keylen)
367 {
368         if (keylen != DES_KEY_SIZE * 3)
369                 return -EINVAL;
370
371         return sec_alg_skcipher_setkey(tfm, key, keylen,
372                                        SEC_C_3DES_ECB_192_3KEY);
373 }
374
375 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
376                                             const u8 *key, unsigned int keylen)
377 {
378         if (keylen != DES3_EDE_KEY_SIZE)
379                 return -EINVAL;
380
381         return sec_alg_skcipher_setkey(tfm, key, keylen,
382                                        SEC_C_3DES_CBC_192_3KEY);
383 }
384
385 static void sec_alg_free_el(struct sec_request_el *el,
386                             struct sec_dev_info *info)
387 {
388         sec_free_hw_sgl(el->out, el->dma_out, info);
389         sec_free_hw_sgl(el->in, el->dma_in, info);
390         kfree(el->sgl_in);
391         kfree(el->sgl_out);
392         kfree(el);
393 }
394
395 /* queuelock must be held */
396 static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
397 {
398         struct sec_request_el *el, *temp;
399         int ret = 0;
400
401         mutex_lock(&sec_req->lock);
402         list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
403                 /*
404                  * Add to hardware queue only under following circumstances
405                  * 1) Software and hardware queue empty so no chain dependencies
406                  * 2) No dependencies as new IV - (check software queue empty
407                  *    to maintain order)
408                  * 3) No dependencies because the mode does no chaining.
409                  *
410                  * In other cases first insert onto the software queue which
411                  * is then emptied as requests complete
412                  */
413                 if (!queue->havesoftqueue ||
414                     (kfifo_is_empty(&queue->softqueue) &&
415                      sec_queue_empty(queue))) {
416                         ret = sec_queue_send(queue, &el->req, sec_req);
417                         if (ret == -EAGAIN) {
418                                 /* Wait unti we can send then try again */
419                                 /* DEAD if here - should not happen */
420                                 ret = -EBUSY;
421                                 goto err_unlock;
422                         }
423                 } else {
424                         kfifo_put(&queue->softqueue, el);
425                 }
426         }
427 err_unlock:
428         mutex_unlock(&sec_req->lock);
429
430         return ret;
431 }
432
433 static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
434                                       struct crypto_async_request *req_base)
435 {
436         struct skcipher_request *skreq = container_of(req_base,
437                                                       struct skcipher_request,
438                                                       base);
439         struct sec_request *sec_req = skcipher_request_ctx(skreq);
440         struct sec_request *backlog_req;
441         struct sec_request_el *sec_req_el, *nextrequest;
442         struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
443         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
444         struct device *dev = ctx->queue->dev_info->dev;
445         int icv_or_skey_en, ret;
446         bool done;
447
448         sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
449                                       head);
450         icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
451                 SEC_BD_W0_ICV_OR_SKEY_EN_S;
452         if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
453                 dev_err(dev, "Got an invalid answer %lu %d\n",
454                         sec_resp->w1 & SEC_BD_W1_BD_INVALID,
455                         icv_or_skey_en);
456                 sec_req->err = -EINVAL;
457                 /*
458                  * We need to muddle on to avoid getting stuck with elements
459                  * on the queue. Error will be reported so requester so
460                  * it should be able to handle appropriately.
461                  */
462         }
463
464         mutex_lock(&ctx->queue->queuelock);
465         /* Put the IV in place for chained cases */
466         switch (ctx->cipher_alg) {
467         case SEC_C_AES_CBC_128:
468         case SEC_C_AES_CBC_192:
469         case SEC_C_AES_CBC_256:
470                 if (sec_req_el->req.w0 & SEC_BD_W0_DE)
471                         sg_pcopy_to_buffer(sec_req_el->sgl_out,
472                                            sg_nents(sec_req_el->sgl_out),
473                                            skreq->iv,
474                                            crypto_skcipher_ivsize(atfm),
475                                            sec_req_el->el_length -
476                                            crypto_skcipher_ivsize(atfm));
477                 else
478                         sg_pcopy_to_buffer(sec_req_el->sgl_in,
479                                            sg_nents(sec_req_el->sgl_in),
480                                            skreq->iv,
481                                            crypto_skcipher_ivsize(atfm),
482                                            sec_req_el->el_length -
483                                            crypto_skcipher_ivsize(atfm));
484                 /* No need to sync to the device as coherent DMA */
485                 break;
486         case SEC_C_AES_CTR_128:
487         case SEC_C_AES_CTR_192:
488         case SEC_C_AES_CTR_256:
489                 crypto_inc(skreq->iv, 16);
490                 break;
491         default:
492                 /* Do not update */
493                 break;
494         }
495
496         if (ctx->queue->havesoftqueue &&
497             !kfifo_is_empty(&ctx->queue->softqueue) &&
498             sec_queue_empty(ctx->queue)) {
499                 ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
500                 if (ret <= 0)
501                         dev_err(dev,
502                                 "Error getting next element from kfifo %d\n",
503                                 ret);
504                 else
505                         /* We know there is space so this cannot fail */
506                         sec_queue_send(ctx->queue, &nextrequest->req,
507                                        nextrequest->sec_req);
508         } else if (!list_empty(&ctx->backlog)) {
509                 /* Need to verify there is room first */
510                 backlog_req = list_first_entry(&ctx->backlog,
511                                                typeof(*backlog_req),
512                                                backlog_head);
513                 if (sec_queue_can_enqueue(ctx->queue,
514                     backlog_req->num_elements) ||
515                     (ctx->queue->havesoftqueue &&
516                      kfifo_avail(&ctx->queue->softqueue) >
517                      backlog_req->num_elements)) {
518                         sec_send_request(backlog_req, ctx->queue);
519                         backlog_req->req_base->complete(backlog_req->req_base,
520                                                         -EINPROGRESS);
521                         list_del(&backlog_req->backlog_head);
522                 }
523         }
524         mutex_unlock(&ctx->queue->queuelock);
525
526         mutex_lock(&sec_req->lock);
527         list_del(&sec_req_el->head);
528         mutex_unlock(&sec_req->lock);
529         sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
530
531         /*
532          * Request is done.
533          * The dance is needed as the lock is freed in the completion
534          */
535         mutex_lock(&sec_req->lock);
536         done = list_empty(&sec_req->elements);
537         mutex_unlock(&sec_req->lock);
538         if (done) {
539                 if (crypto_skcipher_ivsize(atfm)) {
540                         dma_unmap_single(dev, sec_req->dma_iv,
541                                          crypto_skcipher_ivsize(atfm),
542                                          DMA_TO_DEVICE);
543                 }
544                 dma_unmap_sg(dev, skreq->src, sec_req->len_in,
545                              DMA_BIDIRECTIONAL);
546                 if (skreq->src != skreq->dst)
547                         dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
548                                      DMA_BIDIRECTIONAL);
549                 skreq->base.complete(&skreq->base, sec_req->err);
550         }
551 }
552
553 void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
554 {
555         struct sec_request *sec_req = shadow;
556
557         sec_req->cb(resp, sec_req->req_base);
558 }
559
560 static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
561                                               int *steps)
562 {
563         size_t *sizes;
564         int i;
565
566         /* Split into suitable sized blocks */
567         *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
568         sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
569         if (!sizes)
570                 return -ENOMEM;
571
572         for (i = 0; i < *steps - 1; i++)
573                 sizes[i] = SEC_REQ_LIMIT;
574         sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
575         *split_sizes = sizes;
576
577         return 0;
578 }
579
580 static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
581                                 int steps, struct scatterlist ***splits,
582                                 int **splits_nents,
583                                 int sgl_len_in,
584                                 struct device *dev)
585 {
586         int ret, count;
587
588         count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
589         if (!count)
590                 return -EINVAL;
591
592         *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
593         if (!*splits) {
594                 ret = -ENOMEM;
595                 goto err_unmap_sg;
596         }
597         *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
598         if (!*splits_nents) {
599                 ret = -ENOMEM;
600                 goto err_free_splits;
601         }
602
603         /* output the scatter list before and after this */
604         ret = sg_split(sgl, count, 0, steps, split_sizes,
605                        *splits, *splits_nents, GFP_KERNEL);
606         if (ret) {
607                 ret = -ENOMEM;
608                 goto err_free_splits_nents;
609         }
610
611         return 0;
612
613 err_free_splits_nents:
614         kfree(*splits_nents);
615 err_free_splits:
616         kfree(*splits);
617 err_unmap_sg:
618         dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
619
620         return ret;
621 }
622
623 /*
624  * Reverses the sec_map_and_split_sg call for messages not yet added to
625  * the queues.
626  */
627 static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
628                                 struct scatterlist **splits, int *splits_nents,
629                                 int sgl_len_in, struct device *dev)
630 {
631         int i;
632
633         for (i = 0; i < steps; i++)
634                 kfree(splits[i]);
635         kfree(splits_nents);
636         kfree(splits);
637
638         dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
639 }
640
641 static struct sec_request_el
642 *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
643                            int el_size, bool different_dest,
644                            struct scatterlist *sgl_in, int n_ents_in,
645                            struct scatterlist *sgl_out, int n_ents_out,
646                            struct sec_dev_info *info)
647 {
648         struct sec_request_el *el;
649         struct sec_bd_info *req;
650         int ret;
651
652         el = kzalloc(sizeof(*el), GFP_KERNEL);
653         if (!el)
654                 return ERR_PTR(-ENOMEM);
655         el->el_length = el_size;
656         req = &el->req;
657         memcpy(req, template, sizeof(*req));
658
659         req->w0 &= ~SEC_BD_W0_CIPHER_M;
660         if (encrypt)
661                 req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
662         else
663                 req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
664
665         req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
666         req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
667                 SEC_BD_W0_C_GRAN_SIZE_19_16_M;
668
669         req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
670         req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
671                 SEC_BD_W0_C_GRAN_SIZE_21_20_M;
672
673         /* Writing whole u32 so no need to take care of masking */
674         req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
675                 ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
676                  SEC_BD_W2_C_GRAN_SIZE_15_0_M);
677
678         req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
679         req->w1 |= SEC_BD_W1_ADDR_TYPE;
680
681         el->sgl_in = sgl_in;
682
683         ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
684                                         n_ents_in, info);
685         if (ret)
686                 goto err_free_el;
687
688         req->data_addr_lo = lower_32_bits(el->dma_in);
689         req->data_addr_hi = upper_32_bits(el->dma_in);
690
691         if (different_dest) {
692                 el->sgl_out = sgl_out;
693                 ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
694                                                 el->sgl_out,
695                                                 n_ents_out, info);
696                 if (ret)
697                         goto err_free_hw_sgl_in;
698
699                 req->w0 |= SEC_BD_W0_DE;
700                 req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
701                 req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
702
703         } else {
704                 req->w0 &= ~SEC_BD_W0_DE;
705                 req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
706                 req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
707         }
708
709         return el;
710
711 err_free_hw_sgl_in:
712         sec_free_hw_sgl(el->in, el->dma_in, info);
713 err_free_el:
714         kfree(el);
715
716         return ERR_PTR(ret);
717 }
718
719 static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
720                                    bool encrypt)
721 {
722         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
723         struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
724         struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
725         struct sec_queue *queue = ctx->queue;
726         struct sec_request *sec_req = skcipher_request_ctx(skreq);
727         struct sec_dev_info *info = queue->dev_info;
728         int i, ret, steps;
729         size_t *split_sizes;
730         struct scatterlist **splits_in;
731         struct scatterlist **splits_out = NULL;
732         int *splits_in_nents;
733         int *splits_out_nents = NULL;
734         struct sec_request_el *el, *temp;
735         bool split = skreq->src != skreq->dst;
736
737         mutex_init(&sec_req->lock);
738         sec_req->req_base = &skreq->base;
739         sec_req->err = 0;
740         /* SGL mapping out here to allow us to break it up as necessary */
741         sec_req->len_in = sg_nents(skreq->src);
742
743         ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
744                                                  &steps);
745         if (ret)
746                 return ret;
747         sec_req->num_elements = steps;
748         ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
749                                    &splits_in_nents, sec_req->len_in,
750                                    info->dev);
751         if (ret)
752                 goto err_free_split_sizes;
753
754         if (split) {
755                 sec_req->len_out = sg_nents(skreq->dst);
756                 ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
757                                            &splits_out, &splits_out_nents,
758                                            sec_req->len_out, info->dev);
759                 if (ret)
760                         goto err_unmap_in_sg;
761         }
762         /* Shared info stored in seq_req - applies to all BDs */
763         sec_req->tfm_ctx = ctx;
764         sec_req->cb = sec_skcipher_alg_callback;
765         INIT_LIST_HEAD(&sec_req->elements);
766
767         /*
768          * Future optimization.
769          * In the chaining case we can't use a dma pool bounce buffer
770          * but in the case where we know there is no chaining we can
771          */
772         if (crypto_skcipher_ivsize(atfm)) {
773                 sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
774                                                  crypto_skcipher_ivsize(atfm),
775                                                  DMA_TO_DEVICE);
776                 if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
777                         ret = -ENOMEM;
778                         goto err_unmap_out_sg;
779                 }
780         }
781
782         /* Set them all up then queue - cleaner error handling. */
783         for (i = 0; i < steps; i++) {
784                 el = sec_alg_alloc_and_fill_el(&ctx->req_template,
785                                                encrypt ? 1 : 0,
786                                                split_sizes[i],
787                                                skreq->src != skreq->dst,
788                                                splits_in[i], splits_in_nents[i],
789                                                split ? splits_out[i] : NULL,
790                                                split ? splits_out_nents[i] : 0,
791                                                info);
792                 if (IS_ERR(el)) {
793                         ret = PTR_ERR(el);
794                         goto err_free_elements;
795                 }
796                 el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
797                 el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
798                 el->sec_req = sec_req;
799                 list_add_tail(&el->head, &sec_req->elements);
800         }
801
802         /*
803          * Only attempt to queue if the whole lot can fit in the queue -
804          * we can't successfully cleanup after a partial queing so this
805          * must succeed or fail atomically.
806          *
807          * Big hammer test of both software and hardware queues - could be
808          * more refined but this is unlikely to happen so no need.
809          */
810
811         /* Grab a big lock for a long time to avoid concurrency issues */
812         mutex_lock(&queue->queuelock);
813
814         /*
815          * Can go on to queue if we have space in either:
816          * 1) The hardware queue and no software queue
817          * 2) The software queue
818          * AND there is nothing in the backlog.  If there is backlog we
819          * have to only queue to the backlog queue and return busy.
820          */
821         if ((!sec_queue_can_enqueue(queue, steps) &&
822              (!queue->havesoftqueue ||
823               kfifo_avail(&queue->softqueue) > steps)) ||
824             !list_empty(&ctx->backlog)) {
825                 ret = -EBUSY;
826                 if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
827                         list_add_tail(&sec_req->backlog_head, &ctx->backlog);
828                         mutex_unlock(&queue->queuelock);
829                         goto out;
830                 }
831
832                 mutex_unlock(&queue->queuelock);
833                 goto err_free_elements;
834         }
835         ret = sec_send_request(sec_req, queue);
836         mutex_unlock(&queue->queuelock);
837         if (ret)
838                 goto err_free_elements;
839
840         ret = -EINPROGRESS;
841 out:
842         /* Cleanup - all elements in pointer arrays have been copied */
843         kfree(splits_in_nents);
844         kfree(splits_in);
845         kfree(splits_out_nents);
846         kfree(splits_out);
847         kfree(split_sizes);
848         return ret;
849
850 err_free_elements:
851         list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
852                 list_del(&el->head);
853                 sec_alg_free_el(el, info);
854         }
855         if (crypto_skcipher_ivsize(atfm))
856                 dma_unmap_single(info->dev, sec_req->dma_iv,
857                                  crypto_skcipher_ivsize(atfm),
858                                  DMA_BIDIRECTIONAL);
859 err_unmap_out_sg:
860         if (split)
861                 sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
862                                     splits_out_nents, sec_req->len_out,
863                                     info->dev);
864 err_unmap_in_sg:
865         sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
866                             sec_req->len_in, info->dev);
867 err_free_split_sizes:
868         kfree(split_sizes);
869
870         return ret;
871 }
872
873 static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
874 {
875         return sec_alg_skcipher_crypto(req, true);
876 }
877
878 static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
879 {
880         return sec_alg_skcipher_crypto(req, false);
881 }
882
883 static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
884 {
885         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
886
887         mutex_init(&ctx->lock);
888         INIT_LIST_HEAD(&ctx->backlog);
889         crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
890
891         ctx->queue = sec_queue_alloc_start_safe();
892         if (IS_ERR(ctx->queue))
893                 return PTR_ERR(ctx->queue);
894
895         mutex_init(&ctx->queue->queuelock);
896         ctx->queue->havesoftqueue = false;
897
898         return 0;
899 }
900
901 static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
902 {
903         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
904         struct device *dev = ctx->queue->dev_info->dev;
905
906         if (ctx->key) {
907                 memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
908                 dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
909                                   ctx->pkey);
910         }
911         sec_queue_stop_release(ctx->queue);
912 }
913
914 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
915 {
916         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
917         int ret;
918
919         ret = sec_alg_skcipher_init(tfm);
920         if (ret)
921                 return ret;
922
923         INIT_KFIFO(ctx->queue->softqueue);
924         ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
925         if (ret) {
926                 sec_alg_skcipher_exit(tfm);
927                 return ret;
928         }
929         ctx->queue->havesoftqueue = true;
930
931         return 0;
932 }
933
934 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
935 {
936         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
937
938         kfifo_free(&ctx->queue->softqueue);
939         sec_alg_skcipher_exit(tfm);
940 }
941
942 static struct skcipher_alg sec_algs[] = {
943         {
944                 .base = {
945                         .cra_name = "ecb(aes)",
946                         .cra_driver_name = "hisi_sec_aes_ecb",
947                         .cra_priority = 4001,
948                         .cra_flags = CRYPTO_ALG_ASYNC,
949                         .cra_blocksize = AES_BLOCK_SIZE,
950                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
951                         .cra_alignmask = 0,
952                         .cra_module = THIS_MODULE,
953                 },
954                 .init = sec_alg_skcipher_init,
955                 .exit = sec_alg_skcipher_exit,
956                 .setkey = sec_alg_skcipher_setkey_aes_ecb,
957                 .decrypt = sec_alg_skcipher_decrypt,
958                 .encrypt = sec_alg_skcipher_encrypt,
959                 .min_keysize = AES_MIN_KEY_SIZE,
960                 .max_keysize = AES_MAX_KEY_SIZE,
961                 .ivsize = 0,
962         }, {
963                 .base = {
964                         .cra_name = "cbc(aes)",
965                         .cra_driver_name = "hisi_sec_aes_cbc",
966                         .cra_priority = 4001,
967                         .cra_flags = CRYPTO_ALG_ASYNC,
968                         .cra_blocksize = AES_BLOCK_SIZE,
969                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
970                         .cra_alignmask = 0,
971                         .cra_module = THIS_MODULE,
972                 },
973                 .init = sec_alg_skcipher_init_with_queue,
974                 .exit = sec_alg_skcipher_exit_with_queue,
975                 .setkey = sec_alg_skcipher_setkey_aes_cbc,
976                 .decrypt = sec_alg_skcipher_decrypt,
977                 .encrypt = sec_alg_skcipher_encrypt,
978                 .min_keysize = AES_MIN_KEY_SIZE,
979                 .max_keysize = AES_MAX_KEY_SIZE,
980                 .ivsize = AES_BLOCK_SIZE,
981         }, {
982                 .base = {
983                         .cra_name = "ctr(aes)",
984                         .cra_driver_name = "hisi_sec_aes_ctr",
985                         .cra_priority = 4001,
986                         .cra_flags = CRYPTO_ALG_ASYNC,
987                         .cra_blocksize = AES_BLOCK_SIZE,
988                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
989                         .cra_alignmask = 0,
990                         .cra_module = THIS_MODULE,
991                 },
992                 .init = sec_alg_skcipher_init_with_queue,
993                 .exit = sec_alg_skcipher_exit_with_queue,
994                 .setkey = sec_alg_skcipher_setkey_aes_ctr,
995                 .decrypt = sec_alg_skcipher_decrypt,
996                 .encrypt = sec_alg_skcipher_encrypt,
997                 .min_keysize = AES_MIN_KEY_SIZE,
998                 .max_keysize = AES_MAX_KEY_SIZE,
999                 .ivsize = AES_BLOCK_SIZE,
1000         }, {
1001                 .base = {
1002                         .cra_name = "xts(aes)",
1003                         .cra_driver_name = "hisi_sec_aes_xts",
1004                         .cra_priority = 4001,
1005                         .cra_flags = CRYPTO_ALG_ASYNC,
1006                         .cra_blocksize = AES_BLOCK_SIZE,
1007                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1008                         .cra_alignmask = 0,
1009                         .cra_module = THIS_MODULE,
1010                 },
1011                 .init = sec_alg_skcipher_init,
1012                 .exit = sec_alg_skcipher_exit,
1013                 .setkey = sec_alg_skcipher_setkey_aes_xts,
1014                 .decrypt = sec_alg_skcipher_decrypt,
1015                 .encrypt = sec_alg_skcipher_encrypt,
1016                 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1017                 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1018                 .ivsize = AES_BLOCK_SIZE,
1019         }, {
1020         /* Unable to find any test vectors so untested */
1021                 .base = {
1022                         .cra_name = "ecb(des)",
1023                         .cra_driver_name = "hisi_sec_des_ecb",
1024                         .cra_priority = 4001,
1025                         .cra_flags = CRYPTO_ALG_ASYNC,
1026                         .cra_blocksize = DES_BLOCK_SIZE,
1027                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1028                         .cra_alignmask = 0,
1029                         .cra_module = THIS_MODULE,
1030                 },
1031                 .init = sec_alg_skcipher_init,
1032                 .exit = sec_alg_skcipher_exit,
1033                 .setkey = sec_alg_skcipher_setkey_des_ecb,
1034                 .decrypt = sec_alg_skcipher_decrypt,
1035                 .encrypt = sec_alg_skcipher_encrypt,
1036                 .min_keysize = DES_KEY_SIZE,
1037                 .max_keysize = DES_KEY_SIZE,
1038                 .ivsize = 0,
1039         }, {
1040                 .base = {
1041                         .cra_name = "cbc(des)",
1042                         .cra_driver_name = "hisi_sec_des_cbc",
1043                         .cra_priority = 4001,
1044                         .cra_flags = CRYPTO_ALG_ASYNC,
1045                         .cra_blocksize = DES_BLOCK_SIZE,
1046                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1047                         .cra_alignmask = 0,
1048                         .cra_module = THIS_MODULE,
1049                 },
1050                 .init = sec_alg_skcipher_init_with_queue,
1051                 .exit = sec_alg_skcipher_exit_with_queue,
1052                 .setkey = sec_alg_skcipher_setkey_des_cbc,
1053                 .decrypt = sec_alg_skcipher_decrypt,
1054                 .encrypt = sec_alg_skcipher_encrypt,
1055                 .min_keysize = DES_KEY_SIZE,
1056                 .max_keysize = DES_KEY_SIZE,
1057                 .ivsize = DES_BLOCK_SIZE,
1058         }, {
1059                 .base = {
1060                         .cra_name = "cbc(des3_ede)",
1061                         .cra_driver_name = "hisi_sec_3des_cbc",
1062                         .cra_priority = 4001,
1063                         .cra_flags = CRYPTO_ALG_ASYNC,
1064                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1065                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1066                         .cra_alignmask = 0,
1067                         .cra_module = THIS_MODULE,
1068                 },
1069                 .init = sec_alg_skcipher_init_with_queue,
1070                 .exit = sec_alg_skcipher_exit_with_queue,
1071                 .setkey = sec_alg_skcipher_setkey_3des_cbc,
1072                 .decrypt = sec_alg_skcipher_decrypt,
1073                 .encrypt = sec_alg_skcipher_encrypt,
1074                 .min_keysize = DES3_EDE_KEY_SIZE,
1075                 .max_keysize = DES3_EDE_KEY_SIZE,
1076                 .ivsize = DES3_EDE_BLOCK_SIZE,
1077         }, {
1078                 .base = {
1079                         .cra_name = "ecb(des3_ede)",
1080                         .cra_driver_name = "hisi_sec_3des_ecb",
1081                         .cra_priority = 4001,
1082                         .cra_flags = CRYPTO_ALG_ASYNC,
1083                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1084                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1085                         .cra_alignmask = 0,
1086                         .cra_module = THIS_MODULE,
1087                 },
1088                 .init = sec_alg_skcipher_init,
1089                 .exit = sec_alg_skcipher_exit,
1090                 .setkey = sec_alg_skcipher_setkey_3des_ecb,
1091                 .decrypt = sec_alg_skcipher_decrypt,
1092                 .encrypt = sec_alg_skcipher_encrypt,
1093                 .min_keysize = DES3_EDE_KEY_SIZE,
1094                 .max_keysize = DES3_EDE_KEY_SIZE,
1095                 .ivsize = 0,
1096         }
1097 };
1098
1099 int sec_algs_register(void)
1100 {
1101         int ret = 0;
1102
1103         mutex_lock(&algs_lock);
1104         if (++active_devs != 1)
1105                 goto unlock;
1106
1107         ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1108         if (ret)
1109                 --active_devs;
1110 unlock:
1111         mutex_unlock(&algs_lock);
1112
1113         return ret;
1114 }
1115
1116 void sec_algs_unregister(void)
1117 {
1118         mutex_lock(&algs_lock);
1119         if (--active_devs != 0)
1120                 goto unlock;
1121         crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1122
1123 unlock:
1124         mutex_unlock(&algs_lock);
1125 }