Merge branch 'WIP.x86-pti.base.prep-for-linus' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / drivers / crypto / mediatek / mtk-aes.c
1 /*
2  * Cryptographic API.
3  *
4  * Driver for EIP97 AES acceleration.
5  *
6  * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * Some ideas are from atmel-aes.c drivers.
13  */
14
15 #include <crypto/aes.h>
16 #include <crypto/gcm.h>
17 #include "mtk-platform.h"
18
19 #define AES_QUEUE_SIZE          512
20 #define AES_BUF_ORDER           2
21 #define AES_BUF_SIZE            ((PAGE_SIZE << AES_BUF_ORDER) \
22                                 & ~(AES_BLOCK_SIZE - 1))
23 #define AES_MAX_STATE_BUF_SIZE  SIZE_IN_WORDS(AES_KEYSIZE_256 + \
24                                 AES_BLOCK_SIZE * 2)
25 #define AES_MAX_CT_SIZE         6
26
27 #define AES_CT_CTRL_HDR         cpu_to_le32(0x00220000)
28
29 /* AES-CBC/ECB/CTR command token */
30 #define AES_CMD0                cpu_to_le32(0x05000000)
31 #define AES_CMD1                cpu_to_le32(0x2d060000)
32 #define AES_CMD2                cpu_to_le32(0xe4a63806)
33 /* AES-GCM command token */
34 #define AES_GCM_CMD0            cpu_to_le32(0x0b000000)
35 #define AES_GCM_CMD1            cpu_to_le32(0xa0800000)
36 #define AES_GCM_CMD2            cpu_to_le32(0x25000010)
37 #define AES_GCM_CMD3            cpu_to_le32(0x0f020000)
38 #define AES_GCM_CMD4            cpu_to_le32(0x21e60000)
39 #define AES_GCM_CMD5            cpu_to_le32(0x40e60000)
40 #define AES_GCM_CMD6            cpu_to_le32(0xd0070000)
41
42 /* AES transform information word 0 fields */
43 #define AES_TFM_BASIC_OUT       cpu_to_le32(0x4 << 0)
44 #define AES_TFM_BASIC_IN        cpu_to_le32(0x5 << 0)
45 #define AES_TFM_GCM_OUT         cpu_to_le32(0x6 << 0)
46 #define AES_TFM_GCM_IN          cpu_to_le32(0xf << 0)
47 #define AES_TFM_SIZE(x)         cpu_to_le32((x) << 8)
48 #define AES_TFM_128BITS         cpu_to_le32(0xb << 16)
49 #define AES_TFM_192BITS         cpu_to_le32(0xd << 16)
50 #define AES_TFM_256BITS         cpu_to_le32(0xf << 16)
51 #define AES_TFM_GHASH_DIGEST    cpu_to_le32(0x2 << 21)
52 #define AES_TFM_GHASH           cpu_to_le32(0x4 << 23)
53 /* AES transform information word 1 fields */
54 #define AES_TFM_ECB             cpu_to_le32(0x0 << 0)
55 #define AES_TFM_CBC             cpu_to_le32(0x1 << 0)
56 #define AES_TFM_CTR_INIT        cpu_to_le32(0x2 << 0)   /* init counter to 1 */
57 #define AES_TFM_CTR_LOAD        cpu_to_le32(0x6 << 0)   /* load/reuse counter */
58 #define AES_TFM_3IV             cpu_to_le32(0x7 << 5)   /* using IV 0-2 */
59 #define AES_TFM_FULL_IV         cpu_to_le32(0xf << 5)   /* using IV 0-3 */
60 #define AES_TFM_IV_CTR_MODE     cpu_to_le32(0x1 << 10)
61 #define AES_TFM_ENC_HASH        cpu_to_le32(0x1 << 17)
62
63 /* AES flags */
64 #define AES_FLAGS_CIPHER_MSK    GENMASK(2, 0)
65 #define AES_FLAGS_ECB           BIT(0)
66 #define AES_FLAGS_CBC           BIT(1)
67 #define AES_FLAGS_CTR           BIT(2)
68 #define AES_FLAGS_GCM           BIT(3)
69 #define AES_FLAGS_ENCRYPT       BIT(4)
70 #define AES_FLAGS_BUSY          BIT(5)
71
72 #define AES_AUTH_TAG_ERR        cpu_to_le32(BIT(26))
73
74 /**
75  * mtk_aes_info - hardware information of AES
76  * @cmd:        command token, hardware instruction
77  * @tfm:        transform state of cipher algorithm.
78  * @state:      contains keys and initial vectors.
79  *
80  * Memory layout of GCM buffer:
81  * /-----------\
82  * |  AES KEY  | 128/196/256 bits
83  * |-----------|
84  * |  HASH KEY | a string 128 zero bits encrypted using the block cipher
85  * |-----------|
86  * |    IVs    | 4 * 4 bytes
87  * \-----------/
88  *
89  * The engine requires all these info to do:
90  * - Commands decoding and control of the engine's data path.
91  * - Coordinating hardware data fetch and store operations.
92  * - Result token construction and output.
93  */
94 struct mtk_aes_info {
95         __le32 cmd[AES_MAX_CT_SIZE];
96         __le32 tfm[2];
97         __le32 state[AES_MAX_STATE_BUF_SIZE];
98 };
99
100 struct mtk_aes_reqctx {
101         u64 mode;
102 };
103
104 struct mtk_aes_base_ctx {
105         struct mtk_cryp *cryp;
106         u32 keylen;
107         __le32 keymode;
108
109         mtk_aes_fn start;
110
111         struct mtk_aes_info info;
112         dma_addr_t ct_dma;
113         dma_addr_t tfm_dma;
114
115         __le32 ct_hdr;
116         u32 ct_size;
117 };
118
119 struct mtk_aes_ctx {
120         struct mtk_aes_base_ctx base;
121 };
122
123 struct mtk_aes_ctr_ctx {
124         struct mtk_aes_base_ctx base;
125
126         u32     iv[AES_BLOCK_SIZE / sizeof(u32)];
127         size_t offset;
128         struct scatterlist src[2];
129         struct scatterlist dst[2];
130 };
131
132 struct mtk_aes_gcm_ctx {
133         struct mtk_aes_base_ctx base;
134
135         u32 authsize;
136         size_t textlen;
137
138         struct crypto_skcipher *ctr;
139 };
140
141 struct mtk_aes_drv {
142         struct list_head dev_list;
143         /* Device list lock */
144         spinlock_t lock;
145 };
146
147 static struct mtk_aes_drv mtk_aes = {
148         .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
149         .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
150 };
151
152 static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
153 {
154         return readl_relaxed(cryp->base + offset);
155 }
156
157 static inline void mtk_aes_write(struct mtk_cryp *cryp,
158                                  u32 offset, u32 value)
159 {
160         writel_relaxed(value, cryp->base + offset);
161 }
162
163 static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
164 {
165         struct mtk_cryp *cryp = NULL;
166         struct mtk_cryp *tmp;
167
168         spin_lock_bh(&mtk_aes.lock);
169         if (!ctx->cryp) {
170                 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
171                         cryp = tmp;
172                         break;
173                 }
174                 ctx->cryp = cryp;
175         } else {
176                 cryp = ctx->cryp;
177         }
178         spin_unlock_bh(&mtk_aes.lock);
179
180         return cryp;
181 }
182
183 static inline size_t mtk_aes_padlen(size_t len)
184 {
185         len &= AES_BLOCK_SIZE - 1;
186         return len ? AES_BLOCK_SIZE - len : 0;
187 }
188
189 static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
190                                   struct mtk_aes_dma *dma)
191 {
192         int nents;
193
194         if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
195                 return false;
196
197         for (nents = 0; sg; sg = sg_next(sg), ++nents) {
198                 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
199                         return false;
200
201                 if (len <= sg->length) {
202                         if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
203                                 return false;
204
205                         dma->nents = nents + 1;
206                         dma->remainder = sg->length - len;
207                         sg->length = len;
208                         return true;
209                 }
210
211                 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
212                         return false;
213
214                 len -= sg->length;
215         }
216
217         return false;
218 }
219
220 static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
221                                     const struct mtk_aes_reqctx *rctx)
222 {
223         /* Clear all but persistent flags and set request flags. */
224         aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
225 }
226
227 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
228 {
229         struct scatterlist *sg = dma->sg;
230         int nents = dma->nents;
231
232         if (!dma->remainder)
233                 return;
234
235         while (--nents > 0 && sg)
236                 sg = sg_next(sg);
237
238         if (!sg)
239                 return;
240
241         sg->length += dma->remainder;
242 }
243
244 static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
245 {
246         int i;
247
248         for (i = 0; i < SIZE_IN_WORDS(size); i++)
249                 dst[i] = cpu_to_le32(src[i]);
250 }
251
252 static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
253 {
254         int i;
255
256         for (i = 0; i < SIZE_IN_WORDS(size); i++)
257                 dst[i] = cpu_to_be32(src[i]);
258 }
259
260 static inline int mtk_aes_complete(struct mtk_cryp *cryp,
261                                    struct mtk_aes_rec *aes,
262                                    int err)
263 {
264         aes->flags &= ~AES_FLAGS_BUSY;
265         aes->areq->complete(aes->areq, err);
266         /* Handle new request */
267         tasklet_schedule(&aes->queue_task);
268         return err;
269 }
270
271 /*
272  * Write descriptors for processing. This will configure the engine, load
273  * the transform information and then start the packet processing.
274  */
275 static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
276 {
277         struct mtk_ring *ring = cryp->ring[aes->id];
278         struct mtk_desc *cmd = NULL, *res = NULL;
279         struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
280         u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
281         int nents;
282
283         /* Write command descriptors */
284         for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
285                 cmd = ring->cmd_next;
286                 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
287                 cmd->buf = cpu_to_le32(sg_dma_address(ssg));
288
289                 if (nents == 0) {
290                         cmd->hdr |= MTK_DESC_FIRST |
291                                     MTK_DESC_CT_LEN(aes->ctx->ct_size);
292                         cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
293                         cmd->ct_hdr = aes->ctx->ct_hdr;
294                         cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
295                 }
296
297                 /* Shift ring buffer and check boundary */
298                 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
299                         ring->cmd_next = ring->cmd_base;
300         }
301         cmd->hdr |= MTK_DESC_LAST;
302
303         /* Prepare result descriptors */
304         for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
305                 res = ring->res_next;
306                 res->hdr = MTK_DESC_BUF_LEN(dsg->length);
307                 res->buf = cpu_to_le32(sg_dma_address(dsg));
308
309                 if (nents == 0)
310                         res->hdr |= MTK_DESC_FIRST;
311
312                 /* Shift ring buffer and check boundary */
313                 if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
314                         ring->res_next = ring->res_base;
315         }
316         res->hdr |= MTK_DESC_LAST;
317
318         /* Pointer to current result descriptor */
319         ring->res_prev = res;
320
321         /* Prepare enough space for authenticated tag */
322         if (aes->flags & AES_FLAGS_GCM)
323                 res->hdr += AES_BLOCK_SIZE;
324
325         /*
326          * Make sure that all changes to the DMA ring are done before we
327          * start engine.
328          */
329         wmb();
330         /* Start DMA transfer */
331         mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
332         mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
333
334         return -EINPROGRESS;
335 }
336
337 static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
338 {
339         struct mtk_aes_base_ctx *ctx = aes->ctx;
340
341         dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
342                          DMA_TO_DEVICE);
343
344         if (aes->src.sg == aes->dst.sg) {
345                 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
346                              DMA_BIDIRECTIONAL);
347
348                 if (aes->src.sg != &aes->aligned_sg)
349                         mtk_aes_restore_sg(&aes->src);
350         } else {
351                 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
352                              DMA_FROM_DEVICE);
353
354                 if (aes->dst.sg != &aes->aligned_sg)
355                         mtk_aes_restore_sg(&aes->dst);
356
357                 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
358                              DMA_TO_DEVICE);
359
360                 if (aes->src.sg != &aes->aligned_sg)
361                         mtk_aes_restore_sg(&aes->src);
362         }
363
364         if (aes->dst.sg == &aes->aligned_sg)
365                 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
366                                     aes->buf, aes->total);
367 }
368
369 static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
370 {
371         struct mtk_aes_base_ctx *ctx = aes->ctx;
372         struct mtk_aes_info *info = &ctx->info;
373
374         ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
375                                      DMA_TO_DEVICE);
376         if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
377                 goto exit;
378
379         ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
380
381         if (aes->src.sg == aes->dst.sg) {
382                 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
383                                              aes->src.nents,
384                                              DMA_BIDIRECTIONAL);
385                 aes->dst.sg_len = aes->src.sg_len;
386                 if (unlikely(!aes->src.sg_len))
387                         goto sg_map_err;
388         } else {
389                 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
390                                              aes->src.nents, DMA_TO_DEVICE);
391                 if (unlikely(!aes->src.sg_len))
392                         goto sg_map_err;
393
394                 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
395                                              aes->dst.nents, DMA_FROM_DEVICE);
396                 if (unlikely(!aes->dst.sg_len)) {
397                         dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
398                                      DMA_TO_DEVICE);
399                         goto sg_map_err;
400                 }
401         }
402
403         return mtk_aes_xmit(cryp, aes);
404
405 sg_map_err:
406         dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
407 exit:
408         return mtk_aes_complete(cryp, aes, -EINVAL);
409 }
410
411 /* Initialize transform information of CBC/ECB/CTR mode */
412 static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
413                               size_t len)
414 {
415         struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
416         struct mtk_aes_base_ctx *ctx = aes->ctx;
417         struct mtk_aes_info *info = &ctx->info;
418         u32 cnt = 0;
419
420         ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
421         info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
422         info->cmd[cnt++] = AES_CMD1;
423
424         info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
425         if (aes->flags & AES_FLAGS_ENCRYPT)
426                 info->tfm[0] |= AES_TFM_BASIC_OUT;
427         else
428                 info->tfm[0] |= AES_TFM_BASIC_IN;
429
430         switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
431         case AES_FLAGS_CBC:
432                 info->tfm[1] = AES_TFM_CBC;
433                 break;
434         case AES_FLAGS_ECB:
435                 info->tfm[1] = AES_TFM_ECB;
436                 goto ecb;
437         case AES_FLAGS_CTR:
438                 info->tfm[1] = AES_TFM_CTR_LOAD;
439                 goto ctr;
440
441         default:
442                 /* Should not happen... */
443                 return;
444         }
445
446         mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
447                                AES_BLOCK_SIZE);
448 ctr:
449         info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
450         info->tfm[1] |= AES_TFM_FULL_IV;
451         info->cmd[cnt++] = AES_CMD2;
452 ecb:
453         ctx->ct_size = cnt;
454 }
455
456 static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
457                        struct scatterlist *src, struct scatterlist *dst,
458                        size_t len)
459 {
460         size_t padlen = 0;
461         bool src_aligned, dst_aligned;
462
463         aes->total = len;
464         aes->src.sg = src;
465         aes->dst.sg = dst;
466         aes->real_dst = dst;
467
468         src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
469         if (src == dst)
470                 dst_aligned = src_aligned;
471         else
472                 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
473
474         if (!src_aligned || !dst_aligned) {
475                 padlen = mtk_aes_padlen(len);
476
477                 if (len + padlen > AES_BUF_SIZE)
478                         return mtk_aes_complete(cryp, aes, -ENOMEM);
479
480                 if (!src_aligned) {
481                         sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
482                         aes->src.sg = &aes->aligned_sg;
483                         aes->src.nents = 1;
484                         aes->src.remainder = 0;
485                 }
486
487                 if (!dst_aligned) {
488                         aes->dst.sg = &aes->aligned_sg;
489                         aes->dst.nents = 1;
490                         aes->dst.remainder = 0;
491                 }
492
493                 sg_init_table(&aes->aligned_sg, 1);
494                 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
495         }
496
497         mtk_aes_info_init(cryp, aes, len + padlen);
498
499         return mtk_aes_map(cryp, aes);
500 }
501
502 static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
503                                 struct crypto_async_request *new_areq)
504 {
505         struct mtk_aes_rec *aes = cryp->aes[id];
506         struct crypto_async_request *areq, *backlog;
507         struct mtk_aes_base_ctx *ctx;
508         unsigned long flags;
509         int ret = 0;
510
511         spin_lock_irqsave(&aes->lock, flags);
512         if (new_areq)
513                 ret = crypto_enqueue_request(&aes->queue, new_areq);
514         if (aes->flags & AES_FLAGS_BUSY) {
515                 spin_unlock_irqrestore(&aes->lock, flags);
516                 return ret;
517         }
518         backlog = crypto_get_backlog(&aes->queue);
519         areq = crypto_dequeue_request(&aes->queue);
520         if (areq)
521                 aes->flags |= AES_FLAGS_BUSY;
522         spin_unlock_irqrestore(&aes->lock, flags);
523
524         if (!areq)
525                 return ret;
526
527         if (backlog)
528                 backlog->complete(backlog, -EINPROGRESS);
529
530         ctx = crypto_tfm_ctx(areq->tfm);
531
532         aes->areq = areq;
533         aes->ctx = ctx;
534
535         return ctx->start(cryp, aes);
536 }
537
538 static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
539                                      struct mtk_aes_rec *aes)
540 {
541         return mtk_aes_complete(cryp, aes, 0);
542 }
543
544 static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
545 {
546         struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
547         struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
548
549         mtk_aes_set_mode(aes, rctx);
550         aes->resume = mtk_aes_transfer_complete;
551
552         return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
553 }
554
555 static inline struct mtk_aes_ctr_ctx *
556 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
557 {
558         return container_of(ctx, struct mtk_aes_ctr_ctx, base);
559 }
560
561 static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
562 {
563         struct mtk_aes_base_ctx *ctx = aes->ctx;
564         struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
565         struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
566         struct scatterlist *src, *dst;
567         u32 start, end, ctr, blocks;
568         size_t datalen;
569         bool fragmented = false;
570
571         /* Check for transfer completion. */
572         cctx->offset += aes->total;
573         if (cctx->offset >= req->nbytes)
574                 return mtk_aes_transfer_complete(cryp, aes);
575
576         /* Compute data length. */
577         datalen = req->nbytes - cctx->offset;
578         blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
579         ctr = be32_to_cpu(cctx->iv[3]);
580
581         /* Check 32bit counter overflow. */
582         start = ctr;
583         end = start + blocks - 1;
584         if (end < start) {
585                 ctr |= 0xffffffff;
586                 datalen = AES_BLOCK_SIZE * -start;
587                 fragmented = true;
588         }
589
590         /* Jump to offset. */
591         src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
592         dst = ((req->src == req->dst) ? src :
593                scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
594
595         /* Write IVs into transform state buffer. */
596         mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
597                                AES_BLOCK_SIZE);
598
599         if (unlikely(fragmented)) {
600         /*
601          * Increment the counter manually to cope with the hardware
602          * counter overflow.
603          */
604                 cctx->iv[3] = cpu_to_be32(ctr);
605                 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
606         }
607
608         return mtk_aes_dma(cryp, aes, src, dst, datalen);
609 }
610
611 static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
612 {
613         struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
614         struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
615         struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
616
617         mtk_aes_set_mode(aes, rctx);
618
619         memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
620         cctx->offset = 0;
621         aes->total = 0;
622         aes->resume = mtk_aes_ctr_transfer;
623
624         return mtk_aes_ctr_transfer(cryp, aes);
625 }
626
627 /* Check and set the AES key to transform state buffer */
628 static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
629                           const u8 *key, u32 keylen)
630 {
631         struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
632
633         switch (keylen) {
634         case AES_KEYSIZE_128:
635                 ctx->keymode = AES_TFM_128BITS;
636                 break;
637         case AES_KEYSIZE_192:
638                 ctx->keymode = AES_TFM_192BITS;
639                 break;
640         case AES_KEYSIZE_256:
641                 ctx->keymode = AES_TFM_256BITS;
642                 break;
643
644         default:
645                 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
646                 return -EINVAL;
647         }
648
649         ctx->keylen = SIZE_IN_WORDS(keylen);
650         mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
651
652         return 0;
653 }
654
655 static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
656 {
657         struct mtk_aes_base_ctx *ctx;
658         struct mtk_aes_reqctx *rctx;
659
660         ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
661         rctx = ablkcipher_request_ctx(req);
662         rctx->mode = mode;
663
664         return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
665                                     &req->base);
666 }
667
668 static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
669 {
670         return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
671 }
672
673 static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
674 {
675         return mtk_aes_crypt(req, AES_FLAGS_ECB);
676 }
677
678 static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
679 {
680         return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
681 }
682
683 static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
684 {
685         return mtk_aes_crypt(req, AES_FLAGS_CBC);
686 }
687
688 static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
689 {
690         return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
691 }
692
693 static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
694 {
695         return mtk_aes_crypt(req, AES_FLAGS_CTR);
696 }
697
698 static int mtk_aes_cra_init(struct crypto_tfm *tfm)
699 {
700         struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
701         struct mtk_cryp *cryp = NULL;
702
703         cryp = mtk_aes_find_dev(&ctx->base);
704         if (!cryp) {
705                 pr_err("can't find crypto device\n");
706                 return -ENODEV;
707         }
708
709         tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
710         ctx->base.start = mtk_aes_start;
711         return 0;
712 }
713
714 static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
715 {
716         struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
717         struct mtk_cryp *cryp = NULL;
718
719         cryp = mtk_aes_find_dev(&ctx->base);
720         if (!cryp) {
721                 pr_err("can't find crypto device\n");
722                 return -ENODEV;
723         }
724
725         tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
726         ctx->base.start = mtk_aes_ctr_start;
727         return 0;
728 }
729
730 static struct crypto_alg aes_algs[] = {
731 {
732         .cra_name               = "cbc(aes)",
733         .cra_driver_name        = "cbc-aes-mtk",
734         .cra_priority           = 400,
735         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
736                                   CRYPTO_ALG_ASYNC,
737         .cra_init               = mtk_aes_cra_init,
738         .cra_blocksize          = AES_BLOCK_SIZE,
739         .cra_ctxsize            = sizeof(struct mtk_aes_ctx),
740         .cra_alignmask          = 0xf,
741         .cra_type               = &crypto_ablkcipher_type,
742         .cra_module             = THIS_MODULE,
743         .cra_u.ablkcipher = {
744                 .min_keysize    = AES_MIN_KEY_SIZE,
745                 .max_keysize    = AES_MAX_KEY_SIZE,
746                 .setkey         = mtk_aes_setkey,
747                 .encrypt        = mtk_aes_cbc_encrypt,
748                 .decrypt        = mtk_aes_cbc_decrypt,
749                 .ivsize         = AES_BLOCK_SIZE,
750         }
751 },
752 {
753         .cra_name               = "ecb(aes)",
754         .cra_driver_name        = "ecb-aes-mtk",
755         .cra_priority           = 400,
756         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
757                                   CRYPTO_ALG_ASYNC,
758         .cra_init               = mtk_aes_cra_init,
759         .cra_blocksize          = AES_BLOCK_SIZE,
760         .cra_ctxsize            = sizeof(struct mtk_aes_ctx),
761         .cra_alignmask          = 0xf,
762         .cra_type               = &crypto_ablkcipher_type,
763         .cra_module             = THIS_MODULE,
764         .cra_u.ablkcipher = {
765                 .min_keysize    = AES_MIN_KEY_SIZE,
766                 .max_keysize    = AES_MAX_KEY_SIZE,
767                 .setkey         = mtk_aes_setkey,
768                 .encrypt        = mtk_aes_ecb_encrypt,
769                 .decrypt        = mtk_aes_ecb_decrypt,
770         }
771 },
772 {
773         .cra_name               = "ctr(aes)",
774         .cra_driver_name        = "ctr-aes-mtk",
775         .cra_priority           = 400,
776         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
777                                   CRYPTO_ALG_ASYNC,
778         .cra_init               = mtk_aes_ctr_cra_init,
779         .cra_blocksize          = 1,
780         .cra_ctxsize            = sizeof(struct mtk_aes_ctr_ctx),
781         .cra_alignmask          = 0xf,
782         .cra_type               = &crypto_ablkcipher_type,
783         .cra_module             = THIS_MODULE,
784         .cra_u.ablkcipher = {
785                 .min_keysize    = AES_MIN_KEY_SIZE,
786                 .max_keysize    = AES_MAX_KEY_SIZE,
787                 .ivsize         = AES_BLOCK_SIZE,
788                 .setkey         = mtk_aes_setkey,
789                 .encrypt        = mtk_aes_ctr_encrypt,
790                 .decrypt        = mtk_aes_ctr_decrypt,
791         }
792 },
793 };
794
795 static inline struct mtk_aes_gcm_ctx *
796 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
797 {
798         return container_of(ctx, struct mtk_aes_gcm_ctx, base);
799 }
800
801 /*
802  * Engine will verify and compare tag automatically, so we just need
803  * to check returned status which stored in the result descriptor.
804  */
805 static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
806                                   struct mtk_aes_rec *aes)
807 {
808         u32 status = cryp->ring[aes->id]->res_prev->ct;
809
810         return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
811                                 -EBADMSG : 0);
812 }
813
814 /* Initialize transform information of GCM mode */
815 static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
816                                   struct mtk_aes_rec *aes,
817                                   size_t len)
818 {
819         struct aead_request *req = aead_request_cast(aes->areq);
820         struct mtk_aes_base_ctx *ctx = aes->ctx;
821         struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
822         struct mtk_aes_info *info = &ctx->info;
823         u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
824         u32 cnt = 0;
825
826         ctx->ct_hdr = AES_CT_CTRL_HDR | len;
827
828         info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
829         info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
830         info->cmd[cnt++] = AES_GCM_CMD2;
831         info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
832
833         if (aes->flags & AES_FLAGS_ENCRYPT) {
834                 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
835                 info->tfm[0] = AES_TFM_GCM_OUT;
836         } else {
837                 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
838                 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
839                 info->tfm[0] = AES_TFM_GCM_IN;
840         }
841         ctx->ct_size = cnt;
842
843         info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
844                         ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
845                         ctx->keymode;
846         info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
847                        AES_TFM_ENC_HASH;
848
849         mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
850                                AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
851 }
852
853 static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
854                            struct scatterlist *src, struct scatterlist *dst,
855                            size_t len)
856 {
857         bool src_aligned, dst_aligned;
858
859         aes->src.sg = src;
860         aes->dst.sg = dst;
861         aes->real_dst = dst;
862
863         src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
864         if (src == dst)
865                 dst_aligned = src_aligned;
866         else
867                 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
868
869         if (!src_aligned || !dst_aligned) {
870                 if (aes->total > AES_BUF_SIZE)
871                         return mtk_aes_complete(cryp, aes, -ENOMEM);
872
873                 if (!src_aligned) {
874                         sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
875                         aes->src.sg = &aes->aligned_sg;
876                         aes->src.nents = 1;
877                         aes->src.remainder = 0;
878                 }
879
880                 if (!dst_aligned) {
881                         aes->dst.sg = &aes->aligned_sg;
882                         aes->dst.nents = 1;
883                         aes->dst.remainder = 0;
884                 }
885
886                 sg_init_table(&aes->aligned_sg, 1);
887                 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
888         }
889
890         mtk_aes_gcm_info_init(cryp, aes, len);
891
892         return mtk_aes_map(cryp, aes);
893 }
894
895 /* Todo: GMAC */
896 static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
897 {
898         struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
899         struct aead_request *req = aead_request_cast(aes->areq);
900         struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
901         u32 len = req->assoclen + req->cryptlen;
902
903         mtk_aes_set_mode(aes, rctx);
904
905         if (aes->flags & AES_FLAGS_ENCRYPT) {
906                 u32 tag[4];
907
908                 aes->resume = mtk_aes_transfer_complete;
909                 /* Compute total process length. */
910                 aes->total = len + gctx->authsize;
911                 /* Compute text length. */
912                 gctx->textlen = req->cryptlen;
913                 /* Hardware will append authenticated tag to output buffer */
914                 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
915         } else {
916                 aes->resume = mtk_aes_gcm_tag_verify;
917                 aes->total = len;
918                 gctx->textlen = req->cryptlen - gctx->authsize;
919         }
920
921         return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
922 }
923
924 static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
925 {
926         struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
927         struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
928         struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
929
930         /* Empty messages are not supported yet */
931         if (!gctx->textlen && !req->assoclen)
932                 return -EINVAL;
933
934         rctx->mode = AES_FLAGS_GCM | mode;
935
936         return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
937                                     &req->base);
938 }
939
940 /*
941  * Because of the hardware limitation, we need to pre-calculate key(H)
942  * for the GHASH operation. The result of the encryption operation
943  * need to be stored in the transform state buffer.
944  */
945 static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
946                               u32 keylen)
947 {
948         struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
949         struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
950         struct crypto_skcipher *ctr = gctx->ctr;
951         struct {
952                 u32 hash[4];
953                 u8 iv[8];
954
955                 struct crypto_wait wait;
956
957                 struct scatterlist sg[1];
958                 struct skcipher_request req;
959         } *data;
960         int err;
961
962         switch (keylen) {
963         case AES_KEYSIZE_128:
964                 ctx->keymode = AES_TFM_128BITS;
965                 break;
966         case AES_KEYSIZE_192:
967                 ctx->keymode = AES_TFM_192BITS;
968                 break;
969         case AES_KEYSIZE_256:
970                 ctx->keymode = AES_TFM_256BITS;
971                 break;
972
973         default:
974                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
975                 return -EINVAL;
976         }
977
978         ctx->keylen = SIZE_IN_WORDS(keylen);
979
980         /* Same as crypto_gcm_setkey() from crypto/gcm.c */
981         crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
982         crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
983                                   CRYPTO_TFM_REQ_MASK);
984         err = crypto_skcipher_setkey(ctr, key, keylen);
985         crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
986                               CRYPTO_TFM_RES_MASK);
987         if (err)
988                 return err;
989
990         data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
991                        GFP_KERNEL);
992         if (!data)
993                 return -ENOMEM;
994
995         crypto_init_wait(&data->wait);
996         sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
997         skcipher_request_set_tfm(&data->req, ctr);
998         skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
999                                       CRYPTO_TFM_REQ_MAY_BACKLOG,
1000                                       crypto_req_done, &data->wait);
1001         skcipher_request_set_crypt(&data->req, data->sg, data->sg,
1002                                    AES_BLOCK_SIZE, data->iv);
1003
1004         err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
1005                               &data->wait);
1006         if (err)
1007                 goto out;
1008
1009         /* Write key into state buffer */
1010         mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
1011         /* Write key(H) into state buffer */
1012         mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
1013                                AES_BLOCK_SIZE);
1014 out:
1015         kzfree(data);
1016         return err;
1017 }
1018
1019 static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
1020                                    u32 authsize)
1021 {
1022         struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1023         struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1024
1025         /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1026         switch (authsize) {
1027         case 8:
1028         case 12:
1029         case 16:
1030                 break;
1031         default:
1032                 return -EINVAL;
1033         }
1034
1035         gctx->authsize = authsize;
1036         return 0;
1037 }
1038
1039 static int mtk_aes_gcm_encrypt(struct aead_request *req)
1040 {
1041         return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1042 }
1043
1044 static int mtk_aes_gcm_decrypt(struct aead_request *req)
1045 {
1046         return mtk_aes_gcm_crypt(req, 0);
1047 }
1048
1049 static int mtk_aes_gcm_init(struct crypto_aead *aead)
1050 {
1051         struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1052         struct mtk_cryp *cryp = NULL;
1053
1054         cryp = mtk_aes_find_dev(&ctx->base);
1055         if (!cryp) {
1056                 pr_err("can't find crypto device\n");
1057                 return -ENODEV;
1058         }
1059
1060         ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
1061                                          CRYPTO_ALG_ASYNC);
1062         if (IS_ERR(ctx->ctr)) {
1063                 pr_err("Error allocating ctr(aes)\n");
1064                 return PTR_ERR(ctx->ctr);
1065         }
1066
1067         crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
1068         ctx->base.start = mtk_aes_gcm_start;
1069         return 0;
1070 }
1071
1072 static void mtk_aes_gcm_exit(struct crypto_aead *aead)
1073 {
1074         struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1075
1076         crypto_free_skcipher(ctx->ctr);
1077 }
1078
1079 static struct aead_alg aes_gcm_alg = {
1080         .setkey         = mtk_aes_gcm_setkey,
1081         .setauthsize    = mtk_aes_gcm_setauthsize,
1082         .encrypt        = mtk_aes_gcm_encrypt,
1083         .decrypt        = mtk_aes_gcm_decrypt,
1084         .init           = mtk_aes_gcm_init,
1085         .exit           = mtk_aes_gcm_exit,
1086         .ivsize         = GCM_AES_IV_SIZE,
1087         .maxauthsize    = AES_BLOCK_SIZE,
1088
1089         .base = {
1090                 .cra_name               = "gcm(aes)",
1091                 .cra_driver_name        = "gcm-aes-mtk",
1092                 .cra_priority           = 400,
1093                 .cra_flags              = CRYPTO_ALG_ASYNC,
1094                 .cra_blocksize          = 1,
1095                 .cra_ctxsize            = sizeof(struct mtk_aes_gcm_ctx),
1096                 .cra_alignmask          = 0xf,
1097                 .cra_module             = THIS_MODULE,
1098         },
1099 };
1100
1101 static void mtk_aes_queue_task(unsigned long data)
1102 {
1103         struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1104
1105         mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1106 }
1107
1108 static void mtk_aes_done_task(unsigned long data)
1109 {
1110         struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1111         struct mtk_cryp *cryp = aes->cryp;
1112
1113         mtk_aes_unmap(cryp, aes);
1114         aes->resume(cryp, aes);
1115 }
1116
1117 static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1118 {
1119         struct mtk_aes_rec *aes  = (struct mtk_aes_rec *)dev_id;
1120         struct mtk_cryp *cryp = aes->cryp;
1121         u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1122
1123         mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1124
1125         if (likely(AES_FLAGS_BUSY & aes->flags)) {
1126                 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1127                 mtk_aes_write(cryp, RDR_THRESH(aes->id),
1128                               MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1129
1130                 tasklet_schedule(&aes->done_task);
1131         } else {
1132                 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1133         }
1134         return IRQ_HANDLED;
1135 }
1136
1137 /*
1138  * The purpose of creating encryption and decryption records is
1139  * to process outbound/inbound data in parallel, it can improve
1140  * performance in most use cases, such as IPSec VPN, especially
1141  * under heavy network traffic.
1142  */
1143 static int mtk_aes_record_init(struct mtk_cryp *cryp)
1144 {
1145         struct mtk_aes_rec **aes = cryp->aes;
1146         int i, err = -ENOMEM;
1147
1148         for (i = 0; i < MTK_REC_NUM; i++) {
1149                 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
1150                 if (!aes[i])
1151                         goto err_cleanup;
1152
1153                 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
1154                                                 AES_BUF_ORDER);
1155                 if (!aes[i]->buf)
1156                         goto err_cleanup;
1157
1158                 aes[i]->cryp = cryp;
1159
1160                 spin_lock_init(&aes[i]->lock);
1161                 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1162
1163                 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1164                              (unsigned long)aes[i]);
1165                 tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1166                              (unsigned long)aes[i]);
1167         }
1168
1169         /* Link to ring0 and ring1 respectively */
1170         aes[0]->id = MTK_RING0;
1171         aes[1]->id = MTK_RING1;
1172
1173         return 0;
1174
1175 err_cleanup:
1176         for (; i--; ) {
1177                 free_page((unsigned long)aes[i]->buf);
1178                 kfree(aes[i]);
1179         }
1180
1181         return err;
1182 }
1183
1184 static void mtk_aes_record_free(struct mtk_cryp *cryp)
1185 {
1186         int i;
1187
1188         for (i = 0; i < MTK_REC_NUM; i++) {
1189                 tasklet_kill(&cryp->aes[i]->done_task);
1190                 tasklet_kill(&cryp->aes[i]->queue_task);
1191
1192                 free_page((unsigned long)cryp->aes[i]->buf);
1193                 kfree(cryp->aes[i]);
1194         }
1195 }
1196
1197 static void mtk_aes_unregister_algs(void)
1198 {
1199         int i;
1200
1201         crypto_unregister_aead(&aes_gcm_alg);
1202
1203         for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1204                 crypto_unregister_alg(&aes_algs[i]);
1205 }
1206
1207 static int mtk_aes_register_algs(void)
1208 {
1209         int err, i;
1210
1211         for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1212                 err = crypto_register_alg(&aes_algs[i]);
1213                 if (err)
1214                         goto err_aes_algs;
1215         }
1216
1217         err = crypto_register_aead(&aes_gcm_alg);
1218         if (err)
1219                 goto err_aes_algs;
1220
1221         return 0;
1222
1223 err_aes_algs:
1224         for (; i--; )
1225                 crypto_unregister_alg(&aes_algs[i]);
1226
1227         return err;
1228 }
1229
1230 int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1231 {
1232         int ret;
1233
1234         INIT_LIST_HEAD(&cryp->aes_list);
1235
1236         /* Initialize two cipher records */
1237         ret = mtk_aes_record_init(cryp);
1238         if (ret)
1239                 goto err_record;
1240
1241         ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1242                                0, "mtk-aes", cryp->aes[0]);
1243         if (ret) {
1244                 dev_err(cryp->dev, "unable to request AES irq.\n");
1245                 goto err_res;
1246         }
1247
1248         ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1249                                0, "mtk-aes", cryp->aes[1]);
1250         if (ret) {
1251                 dev_err(cryp->dev, "unable to request AES irq.\n");
1252                 goto err_res;
1253         }
1254
1255         /* Enable ring0 and ring1 interrupt */
1256         mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1257         mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1258
1259         spin_lock(&mtk_aes.lock);
1260         list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
1261         spin_unlock(&mtk_aes.lock);
1262
1263         ret = mtk_aes_register_algs();
1264         if (ret)
1265                 goto err_algs;
1266
1267         return 0;
1268
1269 err_algs:
1270         spin_lock(&mtk_aes.lock);
1271         list_del(&cryp->aes_list);
1272         spin_unlock(&mtk_aes.lock);
1273 err_res:
1274         mtk_aes_record_free(cryp);
1275 err_record:
1276
1277         dev_err(cryp->dev, "mtk-aes initialization failed.\n");
1278         return ret;
1279 }
1280
1281 void mtk_cipher_alg_release(struct mtk_cryp *cryp)
1282 {
1283         spin_lock(&mtk_aes.lock);
1284         list_del(&cryp->aes_list);
1285         spin_unlock(&mtk_aes.lock);
1286
1287         mtk_aes_unregister_algs();
1288         mtk_aes_record_free(cryp);
1289 }