Merge tag 'mips_fixes_4.21_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips...
[sfrench/cifs-2.6.git] / drivers / crypto / ccree / cc_aead.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
13 #include "cc_aead.h"
14 #include "cc_request_mgr.h"
15 #include "cc_hash.h"
16 #include "cc_sram_mgr.h"
17
18 #define template_aead   template_u.aead
19
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
22
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
25
26 #define AES_CCM_RFC4309_NONCE_SIZE 3
27 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
28
29 /* Value of each ICV_CMP byte (of 8) in case of success */
30 #define ICV_VERIF_OK 0x01
31
32 struct cc_aead_handle {
33         cc_sram_addr_t sram_workspace_addr;
34         struct list_head aead_list;
35 };
36
37 struct cc_hmac_s {
38         u8 *padded_authkey;
39         u8 *ipad_opad; /* IPAD, OPAD*/
40         dma_addr_t padded_authkey_dma_addr;
41         dma_addr_t ipad_opad_dma_addr;
42 };
43
44 struct cc_xcbc_s {
45         u8 *xcbc_keys; /* K1,K2,K3 */
46         dma_addr_t xcbc_keys_dma_addr;
47 };
48
49 struct cc_aead_ctx {
50         struct cc_drvdata *drvdata;
51         u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
52         u8 *enckey;
53         dma_addr_t enckey_dma_addr;
54         union {
55                 struct cc_hmac_s hmac;
56                 struct cc_xcbc_s xcbc;
57         } auth_state;
58         unsigned int enc_keylen;
59         unsigned int auth_keylen;
60         unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
61         unsigned int hash_len;
62         enum drv_cipher_mode cipher_mode;
63         enum cc_flow_mode flow_mode;
64         enum drv_hash_mode auth_mode;
65 };
66
67 static inline bool valid_assoclen(struct aead_request *req)
68 {
69         return ((req->assoclen == 16) || (req->assoclen == 20));
70 }
71
72 static void cc_aead_exit(struct crypto_aead *tfm)
73 {
74         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
75         struct device *dev = drvdata_to_dev(ctx->drvdata);
76
77         dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
78                 crypto_tfm_alg_name(&tfm->base));
79
80         /* Unmap enckey buffer */
81         if (ctx->enckey) {
82                 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
83                                   ctx->enckey_dma_addr);
84                 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
85                         &ctx->enckey_dma_addr);
86                 ctx->enckey_dma_addr = 0;
87                 ctx->enckey = NULL;
88         }
89
90         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
91                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
92
93                 if (xcbc->xcbc_keys) {
94                         dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
95                                           xcbc->xcbc_keys,
96                                           xcbc->xcbc_keys_dma_addr);
97                 }
98                 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
99                         &xcbc->xcbc_keys_dma_addr);
100                 xcbc->xcbc_keys_dma_addr = 0;
101                 xcbc->xcbc_keys = NULL;
102         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
103                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
104
105                 if (hmac->ipad_opad) {
106                         dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
107                                           hmac->ipad_opad,
108                                           hmac->ipad_opad_dma_addr);
109                         dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
110                                 &hmac->ipad_opad_dma_addr);
111                         hmac->ipad_opad_dma_addr = 0;
112                         hmac->ipad_opad = NULL;
113                 }
114                 if (hmac->padded_authkey) {
115                         dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
116                                           hmac->padded_authkey,
117                                           hmac->padded_authkey_dma_addr);
118                         dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
119                                 &hmac->padded_authkey_dma_addr);
120                         hmac->padded_authkey_dma_addr = 0;
121                         hmac->padded_authkey = NULL;
122                 }
123         }
124 }
125
126 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
127 {
128         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
129
130         return cc_get_default_hash_len(ctx->drvdata);
131 }
132
133 static int cc_aead_init(struct crypto_aead *tfm)
134 {
135         struct aead_alg *alg = crypto_aead_alg(tfm);
136         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
137         struct cc_crypto_alg *cc_alg =
138                         container_of(alg, struct cc_crypto_alg, aead_alg);
139         struct device *dev = drvdata_to_dev(cc_alg->drvdata);
140
141         dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
142                 crypto_tfm_alg_name(&tfm->base));
143
144         /* Initialize modes in instance */
145         ctx->cipher_mode = cc_alg->cipher_mode;
146         ctx->flow_mode = cc_alg->flow_mode;
147         ctx->auth_mode = cc_alg->auth_mode;
148         ctx->drvdata = cc_alg->drvdata;
149         crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
150
151         /* Allocate key buffer, cache line aligned */
152         ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
153                                          &ctx->enckey_dma_addr, GFP_KERNEL);
154         if (!ctx->enckey) {
155                 dev_err(dev, "Failed allocating key buffer\n");
156                 goto init_failed;
157         }
158         dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
159                 ctx->enckey);
160
161         /* Set default authlen value */
162
163         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
164                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
165                 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
166
167                 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
168                 /* (and temporary for user key - up to 256b) */
169                 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
170                                                      &xcbc->xcbc_keys_dma_addr,
171                                                      GFP_KERNEL);
172                 if (!xcbc->xcbc_keys) {
173                         dev_err(dev, "Failed allocating buffer for XCBC keys\n");
174                         goto init_failed;
175                 }
176         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
177                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
178                 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
179                 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
180
181                 /* Allocate dma-coherent buffer for IPAD + OPAD */
182                 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
183                                                      &hmac->ipad_opad_dma_addr,
184                                                      GFP_KERNEL);
185
186                 if (!hmac->ipad_opad) {
187                         dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
188                         goto init_failed;
189                 }
190
191                 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
192                         hmac->ipad_opad);
193
194                 hmac->padded_authkey = dma_alloc_coherent(dev,
195                                                           MAX_HMAC_BLOCK_SIZE,
196                                                           pkey_dma,
197                                                           GFP_KERNEL);
198
199                 if (!hmac->padded_authkey) {
200                         dev_err(dev, "failed to allocate padded_authkey\n");
201                         goto init_failed;
202                 }
203         } else {
204                 ctx->auth_state.hmac.ipad_opad = NULL;
205                 ctx->auth_state.hmac.padded_authkey = NULL;
206         }
207         ctx->hash_len = cc_get_aead_hash_len(tfm);
208
209         return 0;
210
211 init_failed:
212         cc_aead_exit(tfm);
213         return -ENOMEM;
214 }
215
216 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
217 {
218         struct aead_request *areq = (struct aead_request *)cc_req;
219         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
220         struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
221         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
222
223         cc_unmap_aead_request(dev, areq);
224
225         /* Restore ordinary iv pointer */
226         areq->iv = areq_ctx->backup_iv;
227
228         if (err)
229                 goto done;
230
231         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232                 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233                            ctx->authsize) != 0) {
234                         dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235                                 ctx->authsize, ctx->cipher_mode);
236                         /* In case of payload authentication failure, MUST NOT
237                          * revealed the decrypted message --> zero its memory.
238                          */
239                         cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
240                         err = -EBADMSG;
241                 }
242         } else { /*ENCRYPT*/
243                 if (areq_ctx->is_icv_fragmented) {
244                         u32 skip = areq->cryptlen + areq_ctx->dst_offset;
245
246                         cc_copy_sg_portion(dev, areq_ctx->mac_buf,
247                                            areq_ctx->dst_sgl, skip,
248                                            (skip + ctx->authsize),
249                                            CC_SG_FROM_BUF);
250                 }
251
252                 /* If an IV was generated, copy it back to the user provided
253                  * buffer.
254                  */
255                 if (areq_ctx->backup_giv) {
256                         if (ctx->cipher_mode == DRV_CIPHER_CTR)
257                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
258                                        CTR_RFC3686_NONCE_SIZE,
259                                        CTR_RFC3686_IV_SIZE);
260                         else if (ctx->cipher_mode == DRV_CIPHER_CCM)
261                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
262                                        CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
263                 }
264         }
265 done:
266         aead_request_complete(areq, err);
267 }
268
269 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
270                                 struct cc_aead_ctx *ctx)
271 {
272         /* Load the AES key */
273         hw_desc_init(&desc[0]);
274         /* We are using for the source/user key the same buffer
275          * as for the output keys, * because after this key loading it
276          * is not needed anymore
277          */
278         set_din_type(&desc[0], DMA_DLLI,
279                      ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
280                      NS_BIT);
281         set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
282         set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
283         set_key_size_aes(&desc[0], ctx->auth_keylen);
284         set_flow_mode(&desc[0], S_DIN_to_AES);
285         set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
286
287         hw_desc_init(&desc[1]);
288         set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
289         set_flow_mode(&desc[1], DIN_AES_DOUT);
290         set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
291                       AES_KEYSIZE_128, NS_BIT, 0);
292
293         hw_desc_init(&desc[2]);
294         set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
295         set_flow_mode(&desc[2], DIN_AES_DOUT);
296         set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
297                                          + AES_KEYSIZE_128),
298                               AES_KEYSIZE_128, NS_BIT, 0);
299
300         hw_desc_init(&desc[3]);
301         set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
302         set_flow_mode(&desc[3], DIN_AES_DOUT);
303         set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
304                                           + 2 * AES_KEYSIZE_128),
305                               AES_KEYSIZE_128, NS_BIT, 0);
306
307         return 4;
308 }
309
310 static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
311 {
312         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
313         unsigned int digest_ofs = 0;
314         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
315                         DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
316         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
317                         CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
318         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
319
320         unsigned int idx = 0;
321         int i;
322
323         /* calc derived HMAC key */
324         for (i = 0; i < 2; i++) {
325                 /* Load hash initial state */
326                 hw_desc_init(&desc[idx]);
327                 set_cipher_mode(&desc[idx], hash_mode);
328                 set_din_sram(&desc[idx],
329                              cc_larval_digest_addr(ctx->drvdata,
330                                                    ctx->auth_mode),
331                              digest_size);
332                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
333                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
334                 idx++;
335
336                 /* Load the hash current length*/
337                 hw_desc_init(&desc[idx]);
338                 set_cipher_mode(&desc[idx], hash_mode);
339                 set_din_const(&desc[idx], 0, ctx->hash_len);
340                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
341                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
342                 idx++;
343
344                 /* Prepare ipad key */
345                 hw_desc_init(&desc[idx]);
346                 set_xor_val(&desc[idx], hmac_pad_const[i]);
347                 set_cipher_mode(&desc[idx], hash_mode);
348                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
349                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
350                 idx++;
351
352                 /* Perform HASH update */
353                 hw_desc_init(&desc[idx]);
354                 set_din_type(&desc[idx], DMA_DLLI,
355                              hmac->padded_authkey_dma_addr,
356                              SHA256_BLOCK_SIZE, NS_BIT);
357                 set_cipher_mode(&desc[idx], hash_mode);
358                 set_xor_active(&desc[idx]);
359                 set_flow_mode(&desc[idx], DIN_HASH);
360                 idx++;
361
362                 /* Get the digset */
363                 hw_desc_init(&desc[idx]);
364                 set_cipher_mode(&desc[idx], hash_mode);
365                 set_dout_dlli(&desc[idx],
366                               (hmac->ipad_opad_dma_addr + digest_ofs),
367                               digest_size, NS_BIT, 0);
368                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
369                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
370                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
371                 idx++;
372
373                 digest_ofs += digest_size;
374         }
375
376         return idx;
377 }
378
379 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
380 {
381         struct device *dev = drvdata_to_dev(ctx->drvdata);
382
383         dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
384                 ctx->enc_keylen, ctx->auth_keylen);
385
386         switch (ctx->auth_mode) {
387         case DRV_HASH_SHA1:
388         case DRV_HASH_SHA256:
389                 break;
390         case DRV_HASH_XCBC_MAC:
391                 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
392                     ctx->auth_keylen != AES_KEYSIZE_192 &&
393                     ctx->auth_keylen != AES_KEYSIZE_256)
394                         return -ENOTSUPP;
395                 break;
396         case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
397                 if (ctx->auth_keylen > 0)
398                         return -EINVAL;
399                 break;
400         default:
401                 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
402                 return -EINVAL;
403         }
404         /* Check cipher key size */
405         if (ctx->flow_mode == S_DIN_to_DES) {
406                 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
407                         dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
408                                 ctx->enc_keylen);
409                         return -EINVAL;
410                 }
411         } else { /* Default assumed to be AES ciphers */
412                 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
413                     ctx->enc_keylen != AES_KEYSIZE_192 &&
414                     ctx->enc_keylen != AES_KEYSIZE_256) {
415                         dev_err(dev, "Invalid cipher(AES) key size: %u\n",
416                                 ctx->enc_keylen);
417                         return -EINVAL;
418                 }
419         }
420
421         return 0; /* All tests of keys sizes passed */
422 }
423
424 /* This function prepers the user key so it can pass to the hmac processing
425  * (copy to intenral buffer or hash in case of key longer than block
426  */
427 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
428                                  unsigned int keylen)
429 {
430         dma_addr_t key_dma_addr = 0;
431         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
432         struct device *dev = drvdata_to_dev(ctx->drvdata);
433         u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
434         struct cc_crypto_req cc_req = {};
435         unsigned int blocksize;
436         unsigned int digestsize;
437         unsigned int hashmode;
438         unsigned int idx = 0;
439         int rc = 0;
440         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
441         dma_addr_t padded_authkey_dma_addr =
442                 ctx->auth_state.hmac.padded_authkey_dma_addr;
443
444         switch (ctx->auth_mode) { /* auth_key required and >0 */
445         case DRV_HASH_SHA1:
446                 blocksize = SHA1_BLOCK_SIZE;
447                 digestsize = SHA1_DIGEST_SIZE;
448                 hashmode = DRV_HASH_HW_SHA1;
449                 break;
450         case DRV_HASH_SHA256:
451         default:
452                 blocksize = SHA256_BLOCK_SIZE;
453                 digestsize = SHA256_DIGEST_SIZE;
454                 hashmode = DRV_HASH_HW_SHA256;
455         }
456
457         if (keylen != 0) {
458                 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
459                                               DMA_TO_DEVICE);
460                 if (dma_mapping_error(dev, key_dma_addr)) {
461                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
462                                 key, keylen);
463                         return -ENOMEM;
464                 }
465                 if (keylen > blocksize) {
466                         /* Load hash initial state */
467                         hw_desc_init(&desc[idx]);
468                         set_cipher_mode(&desc[idx], hashmode);
469                         set_din_sram(&desc[idx], larval_addr, digestsize);
470                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
471                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
472                         idx++;
473
474                         /* Load the hash current length*/
475                         hw_desc_init(&desc[idx]);
476                         set_cipher_mode(&desc[idx], hashmode);
477                         set_din_const(&desc[idx], 0, ctx->hash_len);
478                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
479                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
480                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
481                         idx++;
482
483                         hw_desc_init(&desc[idx]);
484                         set_din_type(&desc[idx], DMA_DLLI,
485                                      key_dma_addr, keylen, NS_BIT);
486                         set_flow_mode(&desc[idx], DIN_HASH);
487                         idx++;
488
489                         /* Get hashed key */
490                         hw_desc_init(&desc[idx]);
491                         set_cipher_mode(&desc[idx], hashmode);
492                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
493                                       digestsize, NS_BIT, 0);
494                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
495                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
496                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
497                         set_cipher_config0(&desc[idx],
498                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
499                         idx++;
500
501                         hw_desc_init(&desc[idx]);
502                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
503                         set_flow_mode(&desc[idx], BYPASS);
504                         set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
505                                       digestsize), (blocksize - digestsize),
506                                       NS_BIT, 0);
507                         idx++;
508                 } else {
509                         hw_desc_init(&desc[idx]);
510                         set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
511                                      keylen, NS_BIT);
512                         set_flow_mode(&desc[idx], BYPASS);
513                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
514                                       keylen, NS_BIT, 0);
515                         idx++;
516
517                         if ((blocksize - keylen) != 0) {
518                                 hw_desc_init(&desc[idx]);
519                                 set_din_const(&desc[idx], 0,
520                                               (blocksize - keylen));
521                                 set_flow_mode(&desc[idx], BYPASS);
522                                 set_dout_dlli(&desc[idx],
523                                               (padded_authkey_dma_addr +
524                                                keylen),
525                                               (blocksize - keylen), NS_BIT, 0);
526                                 idx++;
527                         }
528                 }
529         } else {
530                 hw_desc_init(&desc[idx]);
531                 set_din_const(&desc[idx], 0, (blocksize - keylen));
532                 set_flow_mode(&desc[idx], BYPASS);
533                 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
534                               blocksize, NS_BIT, 0);
535                 idx++;
536         }
537
538         rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
539         if (rc)
540                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
541
542         if (key_dma_addr)
543                 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
544
545         return rc;
546 }
547
548 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
549                           unsigned int keylen)
550 {
551         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
552         struct rtattr *rta = (struct rtattr *)key;
553         struct cc_crypto_req cc_req = {};
554         struct crypto_authenc_key_param *param;
555         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
556         int rc = -EINVAL;
557         unsigned int seq_len = 0;
558         struct device *dev = drvdata_to_dev(ctx->drvdata);
559
560         dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
561                 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
562
563         /* STAT_PHASE_0: Init and sanity checks */
564
565         if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
566                 if (!RTA_OK(rta, keylen))
567                         goto badkey;
568                 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
569                         goto badkey;
570                 if (RTA_PAYLOAD(rta) < sizeof(*param))
571                         goto badkey;
572                 param = RTA_DATA(rta);
573                 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
574                 key += RTA_ALIGN(rta->rta_len);
575                 keylen -= RTA_ALIGN(rta->rta_len);
576                 if (keylen < ctx->enc_keylen)
577                         goto badkey;
578                 ctx->auth_keylen = keylen - ctx->enc_keylen;
579
580                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
581                         /* the nonce is stored in bytes at end of key */
582                         if (ctx->enc_keylen <
583                             (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
584                                 goto badkey;
585                         /* Copy nonce from last 4 bytes in CTR key to
586                          *  first 4 bytes in CTR IV
587                          */
588                         memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
589                                ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
590                                CTR_RFC3686_NONCE_SIZE);
591                         /* Set CTR key size */
592                         ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
593                 }
594         } else { /* non-authenc - has just one key */
595                 ctx->enc_keylen = keylen;
596                 ctx->auth_keylen = 0;
597         }
598
599         rc = validate_keys_sizes(ctx);
600         if (rc)
601                 goto badkey;
602
603         /* STAT_PHASE_1: Copy key to ctx */
604
605         /* Get key material */
606         memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
607         if (ctx->enc_keylen == 24)
608                 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
609         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
610                 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
611         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
612                 rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
613                 if (rc)
614                         goto badkey;
615         }
616
617         /* STAT_PHASE_2: Create sequence */
618
619         switch (ctx->auth_mode) {
620         case DRV_HASH_SHA1:
621         case DRV_HASH_SHA256:
622                 seq_len = hmac_setkey(desc, ctx);
623                 break;
624         case DRV_HASH_XCBC_MAC:
625                 seq_len = xcbc_setkey(desc, ctx);
626                 break;
627         case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
628                 break; /* No auth. key setup */
629         default:
630                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
631                 rc = -ENOTSUPP;
632                 goto badkey;
633         }
634
635         /* STAT_PHASE_3: Submit sequence to HW */
636
637         if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
638                 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
639                 if (rc) {
640                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
641                         goto setkey_error;
642                 }
643         }
644
645         /* Update STAT_PHASE_3 */
646         return rc;
647
648 badkey:
649         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
650
651 setkey_error:
652         return rc;
653 }
654
655 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
656                                  unsigned int keylen)
657 {
658         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
659
660         if (keylen < 3)
661                 return -EINVAL;
662
663         keylen -= 3;
664         memcpy(ctx->ctr_nonce, key + keylen, 3);
665
666         return cc_aead_setkey(tfm, key, keylen);
667 }
668
669 static int cc_aead_setauthsize(struct crypto_aead *authenc,
670                                unsigned int authsize)
671 {
672         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
673         struct device *dev = drvdata_to_dev(ctx->drvdata);
674
675         /* Unsupported auth. sizes */
676         if (authsize == 0 ||
677             authsize > crypto_aead_maxauthsize(authenc)) {
678                 return -ENOTSUPP;
679         }
680
681         ctx->authsize = authsize;
682         dev_dbg(dev, "authlen=%d\n", ctx->authsize);
683
684         return 0;
685 }
686
687 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
688                                       unsigned int authsize)
689 {
690         switch (authsize) {
691         case 8:
692         case 12:
693         case 16:
694                 break;
695         default:
696                 return -EINVAL;
697         }
698
699         return cc_aead_setauthsize(authenc, authsize);
700 }
701
702 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
703                               unsigned int authsize)
704 {
705         switch (authsize) {
706         case 4:
707         case 6:
708         case 8:
709         case 10:
710         case 12:
711         case 14:
712         case 16:
713                 break;
714         default:
715                 return -EINVAL;
716         }
717
718         return cc_aead_setauthsize(authenc, authsize);
719 }
720
721 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
722                               struct cc_hw_desc desc[], unsigned int *seq_size)
723 {
724         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
725         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
726         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
727         enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
728         unsigned int idx = *seq_size;
729         struct device *dev = drvdata_to_dev(ctx->drvdata);
730
731         switch (assoc_dma_type) {
732         case CC_DMA_BUF_DLLI:
733                 dev_dbg(dev, "ASSOC buffer type DLLI\n");
734                 hw_desc_init(&desc[idx]);
735                 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
736                              areq->assoclen, NS_BIT);
737                 set_flow_mode(&desc[idx], flow_mode);
738                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
739                     areq_ctx->cryptlen > 0)
740                         set_din_not_last_indication(&desc[idx]);
741                 break;
742         case CC_DMA_BUF_MLLI:
743                 dev_dbg(dev, "ASSOC buffer type MLLI\n");
744                 hw_desc_init(&desc[idx]);
745                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
746                              areq_ctx->assoc.mlli_nents, NS_BIT);
747                 set_flow_mode(&desc[idx], flow_mode);
748                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
749                     areq_ctx->cryptlen > 0)
750                         set_din_not_last_indication(&desc[idx]);
751                 break;
752         case CC_DMA_BUF_NULL:
753         default:
754                 dev_err(dev, "Invalid ASSOC buffer type\n");
755         }
756
757         *seq_size = (++idx);
758 }
759
760 static void cc_proc_authen_desc(struct aead_request *areq,
761                                 unsigned int flow_mode,
762                                 struct cc_hw_desc desc[],
763                                 unsigned int *seq_size, int direct)
764 {
765         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
766         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
767         unsigned int idx = *seq_size;
768         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
769         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
770         struct device *dev = drvdata_to_dev(ctx->drvdata);
771
772         switch (data_dma_type) {
773         case CC_DMA_BUF_DLLI:
774         {
775                 struct scatterlist *cipher =
776                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
777                         areq_ctx->dst_sgl : areq_ctx->src_sgl;
778
779                 unsigned int offset =
780                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
781                         areq_ctx->dst_offset : areq_ctx->src_offset;
782                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
783                 hw_desc_init(&desc[idx]);
784                 set_din_type(&desc[idx], DMA_DLLI,
785                              (sg_dma_address(cipher) + offset),
786                              areq_ctx->cryptlen, NS_BIT);
787                 set_flow_mode(&desc[idx], flow_mode);
788                 break;
789         }
790         case CC_DMA_BUF_MLLI:
791         {
792                 /* DOUBLE-PASS flow (as default)
793                  * assoc. + iv + data -compact in one table
794                  * if assoclen is ZERO only IV perform
795                  */
796                 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
797                 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
798
799                 if (areq_ctx->is_single_pass) {
800                         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
801                                 mlli_addr = areq_ctx->dst.sram_addr;
802                                 mlli_nents = areq_ctx->dst.mlli_nents;
803                         } else {
804                                 mlli_addr = areq_ctx->src.sram_addr;
805                                 mlli_nents = areq_ctx->src.mlli_nents;
806                         }
807                 }
808
809                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
810                 hw_desc_init(&desc[idx]);
811                 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
812                              NS_BIT);
813                 set_flow_mode(&desc[idx], flow_mode);
814                 break;
815         }
816         case CC_DMA_BUF_NULL:
817         default:
818                 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
819         }
820
821         *seq_size = (++idx);
822 }
823
824 static void cc_proc_cipher_desc(struct aead_request *areq,
825                                 unsigned int flow_mode,
826                                 struct cc_hw_desc desc[],
827                                 unsigned int *seq_size)
828 {
829         unsigned int idx = *seq_size;
830         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
831         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
832         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
833         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
834         struct device *dev = drvdata_to_dev(ctx->drvdata);
835
836         if (areq_ctx->cryptlen == 0)
837                 return; /*null processing*/
838
839         switch (data_dma_type) {
840         case CC_DMA_BUF_DLLI:
841                 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
842                 hw_desc_init(&desc[idx]);
843                 set_din_type(&desc[idx], DMA_DLLI,
844                              (sg_dma_address(areq_ctx->src_sgl) +
845                               areq_ctx->src_offset), areq_ctx->cryptlen,
846                               NS_BIT);
847                 set_dout_dlli(&desc[idx],
848                               (sg_dma_address(areq_ctx->dst_sgl) +
849                                areq_ctx->dst_offset),
850                               areq_ctx->cryptlen, NS_BIT, 0);
851                 set_flow_mode(&desc[idx], flow_mode);
852                 break;
853         case CC_DMA_BUF_MLLI:
854                 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
855                 hw_desc_init(&desc[idx]);
856                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
857                              areq_ctx->src.mlli_nents, NS_BIT);
858                 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
859                               areq_ctx->dst.mlli_nents, NS_BIT, 0);
860                 set_flow_mode(&desc[idx], flow_mode);
861                 break;
862         case CC_DMA_BUF_NULL:
863         default:
864                 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
865         }
866
867         *seq_size = (++idx);
868 }
869
870 static void cc_proc_digest_desc(struct aead_request *req,
871                                 struct cc_hw_desc desc[],
872                                 unsigned int *seq_size)
873 {
874         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
875         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
876         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
877         unsigned int idx = *seq_size;
878         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
879                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
880         int direct = req_ctx->gen_ctx.op_type;
881
882         /* Get final ICV result */
883         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
884                 hw_desc_init(&desc[idx]);
885                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
886                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
887                 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
888                               NS_BIT, 1);
889                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
890                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
891                         set_aes_not_hash_mode(&desc[idx]);
892                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
893                 } else {
894                         set_cipher_config0(&desc[idx],
895                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
896                         set_cipher_mode(&desc[idx], hash_mode);
897                 }
898         } else { /*Decrypt*/
899                 /* Get ICV out from hardware */
900                 hw_desc_init(&desc[idx]);
901                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
902                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
903                 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
904                               ctx->authsize, NS_BIT, 1);
905                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
906                 set_cipher_config0(&desc[idx],
907                                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
908                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
909                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
910                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
911                         set_aes_not_hash_mode(&desc[idx]);
912                 } else {
913                         set_cipher_mode(&desc[idx], hash_mode);
914                 }
915         }
916
917         *seq_size = (++idx);
918 }
919
920 static void cc_set_cipher_desc(struct aead_request *req,
921                                struct cc_hw_desc desc[],
922                                unsigned int *seq_size)
923 {
924         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
925         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
926         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
927         unsigned int hw_iv_size = req_ctx->hw_iv_size;
928         unsigned int idx = *seq_size;
929         int direct = req_ctx->gen_ctx.op_type;
930
931         /* Setup cipher state */
932         hw_desc_init(&desc[idx]);
933         set_cipher_config0(&desc[idx], direct);
934         set_flow_mode(&desc[idx], ctx->flow_mode);
935         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
936                      hw_iv_size, NS_BIT);
937         if (ctx->cipher_mode == DRV_CIPHER_CTR)
938                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
939         else
940                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
941         set_cipher_mode(&desc[idx], ctx->cipher_mode);
942         idx++;
943
944         /* Setup enc. key */
945         hw_desc_init(&desc[idx]);
946         set_cipher_config0(&desc[idx], direct);
947         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
948         set_flow_mode(&desc[idx], ctx->flow_mode);
949         if (ctx->flow_mode == S_DIN_to_AES) {
950                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
951                              ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
952                               ctx->enc_keylen), NS_BIT);
953                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
954         } else {
955                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
956                              ctx->enc_keylen, NS_BIT);
957                 set_key_size_des(&desc[idx], ctx->enc_keylen);
958         }
959         set_cipher_mode(&desc[idx], ctx->cipher_mode);
960         idx++;
961
962         *seq_size = idx;
963 }
964
965 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
966                            unsigned int *seq_size, unsigned int data_flow_mode)
967 {
968         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
969         int direct = req_ctx->gen_ctx.op_type;
970         unsigned int idx = *seq_size;
971
972         if (req_ctx->cryptlen == 0)
973                 return; /*null processing*/
974
975         cc_set_cipher_desc(req, desc, &idx);
976         cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
977         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
978                 /* We must wait for DMA to write all cipher */
979                 hw_desc_init(&desc[idx]);
980                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
981                 set_dout_no_dma(&desc[idx], 0, 0, 1);
982                 idx++;
983         }
984
985         *seq_size = idx;
986 }
987
988 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
989                              unsigned int *seq_size)
990 {
991         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
992         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
993         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
994                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
995         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
996                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
997         unsigned int idx = *seq_size;
998
999         /* Loading hash ipad xor key state */
1000         hw_desc_init(&desc[idx]);
1001         set_cipher_mode(&desc[idx], hash_mode);
1002         set_din_type(&desc[idx], DMA_DLLI,
1003                      ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1004                      NS_BIT);
1005         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1007         idx++;
1008
1009         /* Load init. digest len (64 bytes) */
1010         hw_desc_init(&desc[idx]);
1011         set_cipher_mode(&desc[idx], hash_mode);
1012         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1013                      ctx->hash_len);
1014         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1016         idx++;
1017
1018         *seq_size = idx;
1019 }
1020
1021 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1022                              unsigned int *seq_size)
1023 {
1024         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1025         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1026         unsigned int idx = *seq_size;
1027
1028         /* Loading MAC state */
1029         hw_desc_init(&desc[idx]);
1030         set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1031         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1032         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1033         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1034         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1035         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1036         set_aes_not_hash_mode(&desc[idx]);
1037         idx++;
1038
1039         /* Setup XCBC MAC K1 */
1040         hw_desc_init(&desc[idx]);
1041         set_din_type(&desc[idx], DMA_DLLI,
1042                      ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1043                      AES_KEYSIZE_128, NS_BIT);
1044         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1045         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049         set_aes_not_hash_mode(&desc[idx]);
1050         idx++;
1051
1052         /* Setup XCBC MAC K2 */
1053         hw_desc_init(&desc[idx]);
1054         set_din_type(&desc[idx], DMA_DLLI,
1055                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1056                       AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1057         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1058         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062         set_aes_not_hash_mode(&desc[idx]);
1063         idx++;
1064
1065         /* Setup XCBC MAC K3 */
1066         hw_desc_init(&desc[idx]);
1067         set_din_type(&desc[idx], DMA_DLLI,
1068                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069                       2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1071         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075         set_aes_not_hash_mode(&desc[idx]);
1076         idx++;
1077
1078         *seq_size = idx;
1079 }
1080
1081 static void cc_proc_header_desc(struct aead_request *req,
1082                                 struct cc_hw_desc desc[],
1083                                 unsigned int *seq_size)
1084 {
1085         unsigned int idx = *seq_size;
1086         /* Hash associated data */
1087         if (req->assoclen > 0)
1088                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1089
1090         /* Hash IV */
1091         *seq_size = idx;
1092 }
1093
1094 static void cc_proc_scheme_desc(struct aead_request *req,
1095                                 struct cc_hw_desc desc[],
1096                                 unsigned int *seq_size)
1097 {
1098         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1099         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1100         struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1101         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1102                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1103         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1104                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1105         unsigned int idx = *seq_size;
1106
1107         hw_desc_init(&desc[idx]);
1108         set_cipher_mode(&desc[idx], hash_mode);
1109         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1110                       ctx->hash_len);
1111         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1112         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1113         set_cipher_do(&desc[idx], DO_PAD);
1114         idx++;
1115
1116         /* Get final ICV result */
1117         hw_desc_init(&desc[idx]);
1118         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1119                       digest_size);
1120         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1121         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1122         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1123         set_cipher_mode(&desc[idx], hash_mode);
1124         idx++;
1125
1126         /* Loading hash opad xor key state */
1127         hw_desc_init(&desc[idx]);
1128         set_cipher_mode(&desc[idx], hash_mode);
1129         set_din_type(&desc[idx], DMA_DLLI,
1130                      (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1131                      digest_size, NS_BIT);
1132         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1133         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1134         idx++;
1135
1136         /* Load init. digest len (64 bytes) */
1137         hw_desc_init(&desc[idx]);
1138         set_cipher_mode(&desc[idx], hash_mode);
1139         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1140                      ctx->hash_len);
1141         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1142         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1143         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1144         idx++;
1145
1146         /* Perform HASH update */
1147         hw_desc_init(&desc[idx]);
1148         set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1149                      digest_size);
1150         set_flow_mode(&desc[idx], DIN_HASH);
1151         idx++;
1152
1153         *seq_size = idx;
1154 }
1155
1156 static void cc_mlli_to_sram(struct aead_request *req,
1157                             struct cc_hw_desc desc[], unsigned int *seq_size)
1158 {
1159         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1160         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1162         struct device *dev = drvdata_to_dev(ctx->drvdata);
1163
1164         if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1165             req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1166             !req_ctx->is_single_pass) {
1167                 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1168                         (unsigned int)ctx->drvdata->mlli_sram_addr,
1169                         req_ctx->mlli_params.mlli_len);
1170                 /* Copy MLLI table host-to-sram */
1171                 hw_desc_init(&desc[*seq_size]);
1172                 set_din_type(&desc[*seq_size], DMA_DLLI,
1173                              req_ctx->mlli_params.mlli_dma_addr,
1174                              req_ctx->mlli_params.mlli_len, NS_BIT);
1175                 set_dout_sram(&desc[*seq_size],
1176                               ctx->drvdata->mlli_sram_addr,
1177                               req_ctx->mlli_params.mlli_len);
1178                 set_flow_mode(&desc[*seq_size], BYPASS);
1179                 (*seq_size)++;
1180         }
1181 }
1182
1183 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1184                                           enum cc_flow_mode setup_flow_mode,
1185                                           bool is_single_pass)
1186 {
1187         enum cc_flow_mode data_flow_mode;
1188
1189         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1190                 if (setup_flow_mode == S_DIN_to_AES)
1191                         data_flow_mode = is_single_pass ?
1192                                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1193                 else
1194                         data_flow_mode = is_single_pass ?
1195                                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1196         } else { /* Decrypt */
1197                 if (setup_flow_mode == S_DIN_to_AES)
1198                         data_flow_mode = is_single_pass ?
1199                                 AES_and_HASH : DIN_AES_DOUT;
1200                 else
1201                         data_flow_mode = is_single_pass ?
1202                                 DES_and_HASH : DIN_DES_DOUT;
1203         }
1204
1205         return data_flow_mode;
1206 }
1207
1208 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1209                             unsigned int *seq_size)
1210 {
1211         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1212         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1213         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1214         int direct = req_ctx->gen_ctx.op_type;
1215         unsigned int data_flow_mode =
1216                 cc_get_data_flow(direct, ctx->flow_mode,
1217                                  req_ctx->is_single_pass);
1218
1219         if (req_ctx->is_single_pass) {
1220                 /**
1221                  * Single-pass flow
1222                  */
1223                 cc_set_hmac_desc(req, desc, seq_size);
1224                 cc_set_cipher_desc(req, desc, seq_size);
1225                 cc_proc_header_desc(req, desc, seq_size);
1226                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1227                 cc_proc_scheme_desc(req, desc, seq_size);
1228                 cc_proc_digest_desc(req, desc, seq_size);
1229                 return;
1230         }
1231
1232         /**
1233          * Double-pass flow
1234          * Fallback for unsupported single-pass modes,
1235          * i.e. using assoc. data of non-word-multiple
1236          */
1237         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1238                 /* encrypt first.. */
1239                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1240                 /* authenc after..*/
1241                 cc_set_hmac_desc(req, desc, seq_size);
1242                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1243                 cc_proc_scheme_desc(req, desc, seq_size);
1244                 cc_proc_digest_desc(req, desc, seq_size);
1245
1246         } else { /*DECRYPT*/
1247                 /* authenc first..*/
1248                 cc_set_hmac_desc(req, desc, seq_size);
1249                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1250                 cc_proc_scheme_desc(req, desc, seq_size);
1251                 /* decrypt after.. */
1252                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1253                 /* read the digest result with setting the completion bit
1254                  * must be after the cipher operation
1255                  */
1256                 cc_proc_digest_desc(req, desc, seq_size);
1257         }
1258 }
1259
1260 static void
1261 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1262                 unsigned int *seq_size)
1263 {
1264         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1265         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1266         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1267         int direct = req_ctx->gen_ctx.op_type;
1268         unsigned int data_flow_mode =
1269                 cc_get_data_flow(direct, ctx->flow_mode,
1270                                  req_ctx->is_single_pass);
1271
1272         if (req_ctx->is_single_pass) {
1273                 /**
1274                  * Single-pass flow
1275                  */
1276                 cc_set_xcbc_desc(req, desc, seq_size);
1277                 cc_set_cipher_desc(req, desc, seq_size);
1278                 cc_proc_header_desc(req, desc, seq_size);
1279                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1280                 cc_proc_digest_desc(req, desc, seq_size);
1281                 return;
1282         }
1283
1284         /**
1285          * Double-pass flow
1286          * Fallback for unsupported single-pass modes,
1287          * i.e. using assoc. data of non-word-multiple
1288          */
1289         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1290                 /* encrypt first.. */
1291                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1292                 /* authenc after.. */
1293                 cc_set_xcbc_desc(req, desc, seq_size);
1294                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1295                 cc_proc_digest_desc(req, desc, seq_size);
1296         } else { /*DECRYPT*/
1297                 /* authenc first.. */
1298                 cc_set_xcbc_desc(req, desc, seq_size);
1299                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1300                 /* decrypt after..*/
1301                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1302                 /* read the digest result with setting the completion bit
1303                  * must be after the cipher operation
1304                  */
1305                 cc_proc_digest_desc(req, desc, seq_size);
1306         }
1307 }
1308
1309 static int validate_data_size(struct cc_aead_ctx *ctx,
1310                               enum drv_crypto_direction direct,
1311                               struct aead_request *req)
1312 {
1313         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1314         struct device *dev = drvdata_to_dev(ctx->drvdata);
1315         unsigned int assoclen = req->assoclen;
1316         unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1317                         (req->cryptlen - ctx->authsize) : req->cryptlen;
1318
1319         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1320             req->cryptlen < ctx->authsize)
1321                 goto data_size_err;
1322
1323         areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1324
1325         switch (ctx->flow_mode) {
1326         case S_DIN_to_AES:
1327                 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1328                     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1329                         goto data_size_err;
1330                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1331                         break;
1332                 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1333                         if (areq_ctx->plaintext_authenticate_only)
1334                                 areq_ctx->is_single_pass = false;
1335                         break;
1336                 }
1337
1338                 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1339                         areq_ctx->is_single_pass = false;
1340
1341                 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1342                     !IS_ALIGNED(cipherlen, sizeof(u32)))
1343                         areq_ctx->is_single_pass = false;
1344
1345                 break;
1346         case S_DIN_to_DES:
1347                 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1348                         goto data_size_err;
1349                 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1350                         areq_ctx->is_single_pass = false;
1351                 break;
1352         default:
1353                 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1354                 goto data_size_err;
1355         }
1356
1357         return 0;
1358
1359 data_size_err:
1360         return -EINVAL;
1361 }
1362
1363 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1364 {
1365         unsigned int len = 0;
1366
1367         if (header_size == 0)
1368                 return 0;
1369
1370         if (header_size < ((1UL << 16) - (1UL << 8))) {
1371                 len = 2;
1372
1373                 pa0_buff[0] = (header_size >> 8) & 0xFF;
1374                 pa0_buff[1] = header_size & 0xFF;
1375         } else {
1376                 len = 6;
1377
1378                 pa0_buff[0] = 0xFF;
1379                 pa0_buff[1] = 0xFE;
1380                 pa0_buff[2] = (header_size >> 24) & 0xFF;
1381                 pa0_buff[3] = (header_size >> 16) & 0xFF;
1382                 pa0_buff[4] = (header_size >> 8) & 0xFF;
1383                 pa0_buff[5] = header_size & 0xFF;
1384         }
1385
1386         return len;
1387 }
1388
1389 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1390 {
1391         __be32 data;
1392
1393         memset(block, 0, csize);
1394         block += csize;
1395
1396         if (csize >= 4)
1397                 csize = 4;
1398         else if (msglen > (1 << (8 * csize)))
1399                 return -EOVERFLOW;
1400
1401         data = cpu_to_be32(msglen);
1402         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1403
1404         return 0;
1405 }
1406
1407 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1408                   unsigned int *seq_size)
1409 {
1410         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1411         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1412         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1413         unsigned int idx = *seq_size;
1414         unsigned int cipher_flow_mode;
1415         dma_addr_t mac_result;
1416
1417         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1418                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1419                 mac_result = req_ctx->mac_buf_dma_addr;
1420         } else { /* Encrypt */
1421                 cipher_flow_mode = AES_and_HASH;
1422                 mac_result = req_ctx->icv_dma_addr;
1423         }
1424
1425         /* load key */
1426         hw_desc_init(&desc[idx]);
1427         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1428         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1429                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1430                       ctx->enc_keylen), NS_BIT);
1431         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1432         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1433         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1434         set_flow_mode(&desc[idx], S_DIN_to_AES);
1435         idx++;
1436
1437         /* load ctr state */
1438         hw_desc_init(&desc[idx]);
1439         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1440         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1441         set_din_type(&desc[idx], DMA_DLLI,
1442                      req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1443         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1444         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1445         set_flow_mode(&desc[idx], S_DIN_to_AES);
1446         idx++;
1447
1448         /* load MAC key */
1449         hw_desc_init(&desc[idx]);
1450         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1451         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1452                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1453                       ctx->enc_keylen), NS_BIT);
1454         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1455         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1456         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1457         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1458         set_aes_not_hash_mode(&desc[idx]);
1459         idx++;
1460
1461         /* load MAC state */
1462         hw_desc_init(&desc[idx]);
1463         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1464         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1465         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1466                      AES_BLOCK_SIZE, NS_BIT);
1467         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1468         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1469         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1470         set_aes_not_hash_mode(&desc[idx]);
1471         idx++;
1472
1473         /* process assoc data */
1474         if (req->assoclen > 0) {
1475                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1476         } else {
1477                 hw_desc_init(&desc[idx]);
1478                 set_din_type(&desc[idx], DMA_DLLI,
1479                              sg_dma_address(&req_ctx->ccm_adata_sg),
1480                              AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1481                 set_flow_mode(&desc[idx], DIN_HASH);
1482                 idx++;
1483         }
1484
1485         /* process the cipher */
1486         if (req_ctx->cryptlen)
1487                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1488
1489         /* Read temporal MAC */
1490         hw_desc_init(&desc[idx]);
1491         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1492         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1493                       NS_BIT, 0);
1494         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1495         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1496         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1497         set_aes_not_hash_mode(&desc[idx]);
1498         idx++;
1499
1500         /* load AES-CTR state (for last MAC calculation)*/
1501         hw_desc_init(&desc[idx]);
1502         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1503         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1504         set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1505                      AES_BLOCK_SIZE, NS_BIT);
1506         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1507         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1508         set_flow_mode(&desc[idx], S_DIN_to_AES);
1509         idx++;
1510
1511         hw_desc_init(&desc[idx]);
1512         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1513         set_dout_no_dma(&desc[idx], 0, 0, 1);
1514         idx++;
1515
1516         /* encrypt the "T" value and store MAC in mac_state */
1517         hw_desc_init(&desc[idx]);
1518         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1519                      ctx->authsize, NS_BIT);
1520         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1521         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1522         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1523         idx++;
1524
1525         *seq_size = idx;
1526         return 0;
1527 }
1528
1529 static int config_ccm_adata(struct aead_request *req)
1530 {
1531         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1532         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1533         struct device *dev = drvdata_to_dev(ctx->drvdata);
1534         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1535         //unsigned int size_of_a = 0, rem_a_size = 0;
1536         unsigned int lp = req->iv[0];
1537         /* Note: The code assume that req->iv[0] already contains the value
1538          * of L' of RFC3610
1539          */
1540         unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1541         unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1542         u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1543         u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1544         u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1545         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1546                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1547                                 req->cryptlen :
1548                                 (req->cryptlen - ctx->authsize);
1549         int rc;
1550
1551         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1552         memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1553
1554         /* taken from crypto/ccm.c */
1555         /* 2 <= L <= 8, so 1 <= L' <= 7. */
1556         if (l < 2 || l > 8) {
1557                 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1558                 return -EINVAL;
1559         }
1560         memcpy(b0, req->iv, AES_BLOCK_SIZE);
1561
1562         /* format control info per RFC 3610 and
1563          * NIST Special Publication 800-38C
1564          */
1565         *b0 |= (8 * ((m - 2) / 2));
1566         if (req->assoclen > 0)
1567                 *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1568
1569         rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1570         if (rc) {
1571                 dev_err(dev, "message len overflow detected");
1572                 return rc;
1573         }
1574          /* END of "taken from crypto/ccm.c" */
1575
1576         /* l(a) - size of associated data. */
1577         req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1578
1579         memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1580         req->iv[15] = 1;
1581
1582         memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1583         ctr_count_0[15] = 0;
1584
1585         return 0;
1586 }
1587
1588 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1589 {
1590         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1591         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1592         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1593
1594         /* L' */
1595         memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1596         /* For RFC 4309, always use 4 bytes for message length
1597          * (at most 2^32-1 bytes).
1598          */
1599         areq_ctx->ctr_iv[0] = 3;
1600
1601         /* In RFC 4309 there is an 11-bytes nonce+IV part,
1602          * that we build here.
1603          */
1604         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1605                CCM_BLOCK_NONCE_SIZE);
1606         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1607                CCM_BLOCK_IV_SIZE);
1608         req->iv = areq_ctx->ctr_iv;
1609         req->assoclen -= CCM_BLOCK_IV_SIZE;
1610 }
1611
1612 static void cc_set_ghash_desc(struct aead_request *req,
1613                               struct cc_hw_desc desc[], unsigned int *seq_size)
1614 {
1615         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1616         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1617         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1618         unsigned int idx = *seq_size;
1619
1620         /* load key to AES*/
1621         hw_desc_init(&desc[idx]);
1622         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1623         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1624         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1625                      ctx->enc_keylen, NS_BIT);
1626         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1627         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1628         set_flow_mode(&desc[idx], S_DIN_to_AES);
1629         idx++;
1630
1631         /* process one zero block to generate hkey */
1632         hw_desc_init(&desc[idx]);
1633         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1634         set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1635                       NS_BIT, 0);
1636         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1637         idx++;
1638
1639         /* Memory Barrier */
1640         hw_desc_init(&desc[idx]);
1641         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1642         set_dout_no_dma(&desc[idx], 0, 0, 1);
1643         idx++;
1644
1645         /* Load GHASH subkey */
1646         hw_desc_init(&desc[idx]);
1647         set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1648                      AES_BLOCK_SIZE, NS_BIT);
1649         set_dout_no_dma(&desc[idx], 0, 0, 1);
1650         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1651         set_aes_not_hash_mode(&desc[idx]);
1652         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1653         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1654         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1655         idx++;
1656
1657         /* Configure Hash Engine to work with GHASH.
1658          * Since it was not possible to extend HASH submodes to add GHASH,
1659          * The following command is necessary in order to
1660          * select GHASH (according to HW designers)
1661          */
1662         hw_desc_init(&desc[idx]);
1663         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1664         set_dout_no_dma(&desc[idx], 0, 0, 1);
1665         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1666         set_aes_not_hash_mode(&desc[idx]);
1667         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1668         set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1669         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1670         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1671         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1672         idx++;
1673
1674         /* Load GHASH initial STATE (which is 0). (for any hash there is an
1675          * initial state)
1676          */
1677         hw_desc_init(&desc[idx]);
1678         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1679         set_dout_no_dma(&desc[idx], 0, 0, 1);
1680         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1681         set_aes_not_hash_mode(&desc[idx]);
1682         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1683         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1684         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1685         idx++;
1686
1687         *seq_size = idx;
1688 }
1689
1690 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1691                              unsigned int *seq_size)
1692 {
1693         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1694         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1695         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1696         unsigned int idx = *seq_size;
1697
1698         /* load key to AES*/
1699         hw_desc_init(&desc[idx]);
1700         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1701         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1702         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1703                      ctx->enc_keylen, NS_BIT);
1704         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1705         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1706         set_flow_mode(&desc[idx], S_DIN_to_AES);
1707         idx++;
1708
1709         if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1710                 /* load AES/CTR initial CTR value inc by 2*/
1711                 hw_desc_init(&desc[idx]);
1712                 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1713                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1714                 set_din_type(&desc[idx], DMA_DLLI,
1715                              req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1716                              NS_BIT);
1717                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1718                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1719                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1720                 idx++;
1721         }
1722
1723         *seq_size = idx;
1724 }
1725
1726 static void cc_proc_gcm_result(struct aead_request *req,
1727                                struct cc_hw_desc desc[],
1728                                unsigned int *seq_size)
1729 {
1730         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1731         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1732         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1733         dma_addr_t mac_result;
1734         unsigned int idx = *seq_size;
1735
1736         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1737                 mac_result = req_ctx->mac_buf_dma_addr;
1738         } else { /* Encrypt */
1739                 mac_result = req_ctx->icv_dma_addr;
1740         }
1741
1742         /* process(ghash) gcm_block_len */
1743         hw_desc_init(&desc[idx]);
1744         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1745                      AES_BLOCK_SIZE, NS_BIT);
1746         set_flow_mode(&desc[idx], DIN_HASH);
1747         idx++;
1748
1749         /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1750         hw_desc_init(&desc[idx]);
1751         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1752         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1753         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1754                       NS_BIT, 0);
1755         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1756         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1757         set_aes_not_hash_mode(&desc[idx]);
1758
1759         idx++;
1760
1761         /* load AES/CTR initial CTR value inc by 1*/
1762         hw_desc_init(&desc[idx]);
1763         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1764         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1765         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1766                      AES_BLOCK_SIZE, NS_BIT);
1767         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1768         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1769         set_flow_mode(&desc[idx], S_DIN_to_AES);
1770         idx++;
1771
1772         /* Memory Barrier */
1773         hw_desc_init(&desc[idx]);
1774         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1775         set_dout_no_dma(&desc[idx], 0, 0, 1);
1776         idx++;
1777
1778         /* process GCTR on stored GHASH and store MAC in mac_state*/
1779         hw_desc_init(&desc[idx]);
1780         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1781         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1782                      AES_BLOCK_SIZE, NS_BIT);
1783         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1784         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1785         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1786         idx++;
1787
1788         *seq_size = idx;
1789 }
1790
1791 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1792                   unsigned int *seq_size)
1793 {
1794         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1795         unsigned int cipher_flow_mode;
1796
1797         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1798                 cipher_flow_mode = AES_and_HASH;
1799         } else { /* Encrypt */
1800                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1801         }
1802
1803         //in RFC4543 no data to encrypt. just copy data from src to dest.
1804         if (req_ctx->plaintext_authenticate_only) {
1805                 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1806                 cc_set_ghash_desc(req, desc, seq_size);
1807                 /* process(ghash) assoc data */
1808                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1809                 cc_set_gctr_desc(req, desc, seq_size);
1810                 cc_proc_gcm_result(req, desc, seq_size);
1811                 return 0;
1812         }
1813
1814         // for gcm and rfc4106.
1815         cc_set_ghash_desc(req, desc, seq_size);
1816         /* process(ghash) assoc data */
1817         if (req->assoclen > 0)
1818                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1819         cc_set_gctr_desc(req, desc, seq_size);
1820         /* process(gctr+ghash) */
1821         if (req_ctx->cryptlen)
1822                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1823         cc_proc_gcm_result(req, desc, seq_size);
1824
1825         return 0;
1826 }
1827
1828 static int config_gcm_context(struct aead_request *req)
1829 {
1830         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1831         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1832         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1833         struct device *dev = drvdata_to_dev(ctx->drvdata);
1834
1835         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1836                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1837                                 req->cryptlen :
1838                                 (req->cryptlen - ctx->authsize);
1839         __be32 counter = cpu_to_be32(2);
1840
1841         dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
1842                 __func__, cryptlen, req->assoclen, ctx->authsize);
1843
1844         memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1845
1846         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1847
1848         memcpy(req->iv + 12, &counter, 4);
1849         memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1850
1851         counter = cpu_to_be32(1);
1852         memcpy(req->iv + 12, &counter, 4);
1853         memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1854
1855         if (!req_ctx->plaintext_authenticate_only) {
1856                 __be64 temp64;
1857
1858                 temp64 = cpu_to_be64(req->assoclen * 8);
1859                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1860                 temp64 = cpu_to_be64(cryptlen * 8);
1861                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1862         } else {
1863                 /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1864                  * data that is nothing is encrypted.
1865                  */
1866                 __be64 temp64;
1867
1868                 temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
1869                                       cryptlen) * 8);
1870                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1871                 temp64 = 0;
1872                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1873         }
1874
1875         return 0;
1876 }
1877
1878 static void cc_proc_rfc4_gcm(struct aead_request *req)
1879 {
1880         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1882         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1883
1884         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1885                ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1886         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1887                GCM_BLOCK_RFC4_IV_SIZE);
1888         req->iv = areq_ctx->ctr_iv;
1889         req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1890 }
1891
1892 static int cc_proc_aead(struct aead_request *req,
1893                         enum drv_crypto_direction direct)
1894 {
1895         int rc = 0;
1896         int seq_len = 0;
1897         struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1898         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1899         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1900         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1901         struct device *dev = drvdata_to_dev(ctx->drvdata);
1902         struct cc_crypto_req cc_req = {};
1903
1904         dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1905                 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1906                 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1907                 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1908
1909         /* STAT_PHASE_0: Init and sanity checks */
1910
1911         /* Check data length according to mode */
1912         if (validate_data_size(ctx, direct, req)) {
1913                 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1914                         req->cryptlen, req->assoclen);
1915                 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1916                 return -EINVAL;
1917         }
1918
1919         /* Setup request structure */
1920         cc_req.user_cb = (void *)cc_aead_complete;
1921         cc_req.user_arg = (void *)req;
1922
1923         /* Setup request context */
1924         areq_ctx->gen_ctx.op_type = direct;
1925         areq_ctx->req_authsize = ctx->authsize;
1926         areq_ctx->cipher_mode = ctx->cipher_mode;
1927
1928         /* STAT_PHASE_1: Map buffers */
1929
1930         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1931                 /* Build CTR IV - Copy nonce from last 4 bytes in
1932                  * CTR key to first 4 bytes in CTR IV
1933                  */
1934                 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1935                        CTR_RFC3686_NONCE_SIZE);
1936                 if (!areq_ctx->backup_giv) /*User none-generated IV*/
1937                         memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1938                                req->iv, CTR_RFC3686_IV_SIZE);
1939                 /* Initialize counter portion of counter block */
1940                 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1941                             CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1942
1943                 /* Replace with counter iv */
1944                 req->iv = areq_ctx->ctr_iv;
1945                 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1946         } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1947                    (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1948                 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1949                 if (areq_ctx->ctr_iv != req->iv) {
1950                         memcpy(areq_ctx->ctr_iv, req->iv,
1951                                crypto_aead_ivsize(tfm));
1952                         req->iv = areq_ctx->ctr_iv;
1953                 }
1954         }  else {
1955                 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1956         }
1957
1958         if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1959                 rc = config_ccm_adata(req);
1960                 if (rc) {
1961                         dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1962                                 rc);
1963                         goto exit;
1964                 }
1965         } else {
1966                 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1967         }
1968
1969         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1970                 rc = config_gcm_context(req);
1971                 if (rc) {
1972                         dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1973                                 rc);
1974                         goto exit;
1975                 }
1976         }
1977
1978         rc = cc_map_aead_request(ctx->drvdata, req);
1979         if (rc) {
1980                 dev_err(dev, "map_request() failed\n");
1981                 goto exit;
1982         }
1983
1984         /* do we need to generate IV? */
1985         if (areq_ctx->backup_giv) {
1986                 /* set the DMA mapped IV address*/
1987                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1988                         cc_req.ivgen_dma_addr[0] =
1989                                 areq_ctx->gen_ctx.iv_dma_addr +
1990                                 CTR_RFC3686_NONCE_SIZE;
1991                         cc_req.ivgen_dma_addr_len = 1;
1992                 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1993                         /* In ccm, the IV needs to exist both inside B0 and
1994                          * inside the counter.It is also copied to iv_dma_addr
1995                          * for other reasons (like returning it to the user).
1996                          * So, using 3 (identical) IV outputs.
1997                          */
1998                         cc_req.ivgen_dma_addr[0] =
1999                                 areq_ctx->gen_ctx.iv_dma_addr +
2000                                 CCM_BLOCK_IV_OFFSET;
2001                         cc_req.ivgen_dma_addr[1] =
2002                                 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2003                                 CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2004                         cc_req.ivgen_dma_addr[2] =
2005                                 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2006                                 CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2007                         cc_req.ivgen_dma_addr_len = 3;
2008                 } else {
2009                         cc_req.ivgen_dma_addr[0] =
2010                                 areq_ctx->gen_ctx.iv_dma_addr;
2011                         cc_req.ivgen_dma_addr_len = 1;
2012                 }
2013
2014                 /* set the IV size (8/16 B long)*/
2015                 cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2016         }
2017
2018         /* STAT_PHASE_2: Create sequence */
2019
2020         /* Load MLLI tables to SRAM if necessary */
2021         cc_mlli_to_sram(req, desc, &seq_len);
2022
2023         /*TODO: move seq len by reference */
2024         switch (ctx->auth_mode) {
2025         case DRV_HASH_SHA1:
2026         case DRV_HASH_SHA256:
2027                 cc_hmac_authenc(req, desc, &seq_len);
2028                 break;
2029         case DRV_HASH_XCBC_MAC:
2030                 cc_xcbc_authenc(req, desc, &seq_len);
2031                 break;
2032         case DRV_HASH_NULL:
2033                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2034                         cc_ccm(req, desc, &seq_len);
2035                 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2036                         cc_gcm(req, desc, &seq_len);
2037                 break;
2038         default:
2039                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2040                 cc_unmap_aead_request(dev, req);
2041                 rc = -ENOTSUPP;
2042                 goto exit;
2043         }
2044
2045         /* STAT_PHASE_3: Lock HW and push sequence */
2046
2047         rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2048
2049         if (rc != -EINPROGRESS && rc != -EBUSY) {
2050                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2051                 cc_unmap_aead_request(dev, req);
2052         }
2053
2054 exit:
2055         return rc;
2056 }
2057
2058 static int cc_aead_encrypt(struct aead_request *req)
2059 {
2060         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2061         int rc;
2062
2063         /* No generated IV required */
2064         areq_ctx->backup_iv = req->iv;
2065         areq_ctx->backup_giv = NULL;
2066         areq_ctx->is_gcm4543 = false;
2067
2068         areq_ctx->plaintext_authenticate_only = false;
2069
2070         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2071         if (rc != -EINPROGRESS && rc != -EBUSY)
2072                 req->iv = areq_ctx->backup_iv;
2073
2074         return rc;
2075 }
2076
2077 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2078 {
2079         /* Very similar to cc_aead_encrypt() above. */
2080
2081         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2082         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2083         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2084         struct device *dev = drvdata_to_dev(ctx->drvdata);
2085         int rc = -EINVAL;
2086
2087         if (!valid_assoclen(req)) {
2088                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2089                 goto out;
2090         }
2091
2092         /* No generated IV required */
2093         areq_ctx->backup_iv = req->iv;
2094         areq_ctx->backup_giv = NULL;
2095         areq_ctx->is_gcm4543 = true;
2096
2097         cc_proc_rfc4309_ccm(req);
2098
2099         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2100         if (rc != -EINPROGRESS && rc != -EBUSY)
2101                 req->iv = areq_ctx->backup_iv;
2102 out:
2103         return rc;
2104 }
2105
2106 static int cc_aead_decrypt(struct aead_request *req)
2107 {
2108         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2109         int rc;
2110
2111         /* No generated IV required */
2112         areq_ctx->backup_iv = req->iv;
2113         areq_ctx->backup_giv = NULL;
2114         areq_ctx->is_gcm4543 = false;
2115
2116         areq_ctx->plaintext_authenticate_only = false;
2117
2118         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2119         if (rc != -EINPROGRESS && rc != -EBUSY)
2120                 req->iv = areq_ctx->backup_iv;
2121
2122         return rc;
2123 }
2124
2125 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2126 {
2127         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2128         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2129         struct device *dev = drvdata_to_dev(ctx->drvdata);
2130         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2131         int rc = -EINVAL;
2132
2133         if (!valid_assoclen(req)) {
2134                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2135                 goto out;
2136         }
2137
2138         /* No generated IV required */
2139         areq_ctx->backup_iv = req->iv;
2140         areq_ctx->backup_giv = NULL;
2141
2142         areq_ctx->is_gcm4543 = true;
2143         cc_proc_rfc4309_ccm(req);
2144
2145         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2146         if (rc != -EINPROGRESS && rc != -EBUSY)
2147                 req->iv = areq_ctx->backup_iv;
2148
2149 out:
2150         return rc;
2151 }
2152
2153 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2154                                  unsigned int keylen)
2155 {
2156         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2157         struct device *dev = drvdata_to_dev(ctx->drvdata);
2158
2159         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2160
2161         if (keylen < 4)
2162                 return -EINVAL;
2163
2164         keylen -= 4;
2165         memcpy(ctx->ctr_nonce, key + keylen, 4);
2166
2167         return cc_aead_setkey(tfm, key, keylen);
2168 }
2169
2170 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2171                                  unsigned int keylen)
2172 {
2173         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2174         struct device *dev = drvdata_to_dev(ctx->drvdata);
2175
2176         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2177
2178         if (keylen < 4)
2179                 return -EINVAL;
2180
2181         keylen -= 4;
2182         memcpy(ctx->ctr_nonce, key + keylen, 4);
2183
2184         return cc_aead_setkey(tfm, key, keylen);
2185 }
2186
2187 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2188                               unsigned int authsize)
2189 {
2190         switch (authsize) {
2191         case 4:
2192         case 8:
2193         case 12:
2194         case 13:
2195         case 14:
2196         case 15:
2197         case 16:
2198                 break;
2199         default:
2200                 return -EINVAL;
2201         }
2202
2203         return cc_aead_setauthsize(authenc, authsize);
2204 }
2205
2206 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2207                                       unsigned int authsize)
2208 {
2209         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2210         struct device *dev = drvdata_to_dev(ctx->drvdata);
2211
2212         dev_dbg(dev, "authsize %d\n", authsize);
2213
2214         switch (authsize) {
2215         case 8:
2216         case 12:
2217         case 16:
2218                 break;
2219         default:
2220                 return -EINVAL;
2221         }
2222
2223         return cc_aead_setauthsize(authenc, authsize);
2224 }
2225
2226 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2227                                       unsigned int authsize)
2228 {
2229         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2230         struct device *dev = drvdata_to_dev(ctx->drvdata);
2231
2232         dev_dbg(dev, "authsize %d\n", authsize);
2233
2234         if (authsize != 16)
2235                 return -EINVAL;
2236
2237         return cc_aead_setauthsize(authenc, authsize);
2238 }
2239
2240 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2241 {
2242         /* Very similar to cc_aead_encrypt() above. */
2243
2244         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2245         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2246         struct device *dev = drvdata_to_dev(ctx->drvdata);
2247         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2248         int rc = -EINVAL;
2249
2250         if (!valid_assoclen(req)) {
2251                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2252                 goto out;
2253         }
2254
2255         /* No generated IV required */
2256         areq_ctx->backup_iv = req->iv;
2257         areq_ctx->backup_giv = NULL;
2258
2259         areq_ctx->plaintext_authenticate_only = false;
2260
2261         cc_proc_rfc4_gcm(req);
2262         areq_ctx->is_gcm4543 = true;
2263
2264         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2265         if (rc != -EINPROGRESS && rc != -EBUSY)
2266                 req->iv = areq_ctx->backup_iv;
2267 out:
2268         return rc;
2269 }
2270
2271 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2272 {
2273         /* Very similar to cc_aead_encrypt() above. */
2274
2275         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2276         int rc;
2277
2278         //plaintext is not encryped with rfc4543
2279         areq_ctx->plaintext_authenticate_only = true;
2280
2281         /* No generated IV required */
2282         areq_ctx->backup_iv = req->iv;
2283         areq_ctx->backup_giv = NULL;
2284
2285         cc_proc_rfc4_gcm(req);
2286         areq_ctx->is_gcm4543 = true;
2287
2288         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2289         if (rc != -EINPROGRESS && rc != -EBUSY)
2290                 req->iv = areq_ctx->backup_iv;
2291
2292         return rc;
2293 }
2294
2295 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2296 {
2297         /* Very similar to cc_aead_decrypt() above. */
2298
2299         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2300         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2301         struct device *dev = drvdata_to_dev(ctx->drvdata);
2302         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2303         int rc = -EINVAL;
2304
2305         if (!valid_assoclen(req)) {
2306                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2307                 goto out;
2308         }
2309
2310         /* No generated IV required */
2311         areq_ctx->backup_iv = req->iv;
2312         areq_ctx->backup_giv = NULL;
2313
2314         areq_ctx->plaintext_authenticate_only = false;
2315
2316         cc_proc_rfc4_gcm(req);
2317         areq_ctx->is_gcm4543 = true;
2318
2319         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2320         if (rc != -EINPROGRESS && rc != -EBUSY)
2321                 req->iv = areq_ctx->backup_iv;
2322 out:
2323         return rc;
2324 }
2325
2326 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2327 {
2328         /* Very similar to cc_aead_decrypt() above. */
2329
2330         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2331         int rc;
2332
2333         //plaintext is not decryped with rfc4543
2334         areq_ctx->plaintext_authenticate_only = true;
2335
2336         /* No generated IV required */
2337         areq_ctx->backup_iv = req->iv;
2338         areq_ctx->backup_giv = NULL;
2339
2340         cc_proc_rfc4_gcm(req);
2341         areq_ctx->is_gcm4543 = true;
2342
2343         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2344         if (rc != -EINPROGRESS && rc != -EBUSY)
2345                 req->iv = areq_ctx->backup_iv;
2346
2347         return rc;
2348 }
2349
2350 /* aead alg */
2351 static struct cc_alg_template aead_algs[] = {
2352         {
2353                 .name = "authenc(hmac(sha1),cbc(aes))",
2354                 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2355                 .blocksize = AES_BLOCK_SIZE,
2356                 .template_aead = {
2357                         .setkey = cc_aead_setkey,
2358                         .setauthsize = cc_aead_setauthsize,
2359                         .encrypt = cc_aead_encrypt,
2360                         .decrypt = cc_aead_decrypt,
2361                         .init = cc_aead_init,
2362                         .exit = cc_aead_exit,
2363                         .ivsize = AES_BLOCK_SIZE,
2364                         .maxauthsize = SHA1_DIGEST_SIZE,
2365                 },
2366                 .cipher_mode = DRV_CIPHER_CBC,
2367                 .flow_mode = S_DIN_to_AES,
2368                 .auth_mode = DRV_HASH_SHA1,
2369                 .min_hw_rev = CC_HW_REV_630,
2370                 .std_body = CC_STD_NIST,
2371         },
2372         {
2373                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2374                 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2375                 .blocksize = DES3_EDE_BLOCK_SIZE,
2376                 .template_aead = {
2377                         .setkey = cc_aead_setkey,
2378                         .setauthsize = cc_aead_setauthsize,
2379                         .encrypt = cc_aead_encrypt,
2380                         .decrypt = cc_aead_decrypt,
2381                         .init = cc_aead_init,
2382                         .exit = cc_aead_exit,
2383                         .ivsize = DES3_EDE_BLOCK_SIZE,
2384                         .maxauthsize = SHA1_DIGEST_SIZE,
2385                 },
2386                 .cipher_mode = DRV_CIPHER_CBC,
2387                 .flow_mode = S_DIN_to_DES,
2388                 .auth_mode = DRV_HASH_SHA1,
2389                 .min_hw_rev = CC_HW_REV_630,
2390                 .std_body = CC_STD_NIST,
2391         },
2392         {
2393                 .name = "authenc(hmac(sha256),cbc(aes))",
2394                 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2395                 .blocksize = AES_BLOCK_SIZE,
2396                 .template_aead = {
2397                         .setkey = cc_aead_setkey,
2398                         .setauthsize = cc_aead_setauthsize,
2399                         .encrypt = cc_aead_encrypt,
2400                         .decrypt = cc_aead_decrypt,
2401                         .init = cc_aead_init,
2402                         .exit = cc_aead_exit,
2403                         .ivsize = AES_BLOCK_SIZE,
2404                         .maxauthsize = SHA256_DIGEST_SIZE,
2405                 },
2406                 .cipher_mode = DRV_CIPHER_CBC,
2407                 .flow_mode = S_DIN_to_AES,
2408                 .auth_mode = DRV_HASH_SHA256,
2409                 .min_hw_rev = CC_HW_REV_630,
2410                 .std_body = CC_STD_NIST,
2411         },
2412         {
2413                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2414                 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2415                 .blocksize = DES3_EDE_BLOCK_SIZE,
2416                 .template_aead = {
2417                         .setkey = cc_aead_setkey,
2418                         .setauthsize = cc_aead_setauthsize,
2419                         .encrypt = cc_aead_encrypt,
2420                         .decrypt = cc_aead_decrypt,
2421                         .init = cc_aead_init,
2422                         .exit = cc_aead_exit,
2423                         .ivsize = DES3_EDE_BLOCK_SIZE,
2424                         .maxauthsize = SHA256_DIGEST_SIZE,
2425                 },
2426                 .cipher_mode = DRV_CIPHER_CBC,
2427                 .flow_mode = S_DIN_to_DES,
2428                 .auth_mode = DRV_HASH_SHA256,
2429                 .min_hw_rev = CC_HW_REV_630,
2430                 .std_body = CC_STD_NIST,
2431         },
2432         {
2433                 .name = "authenc(xcbc(aes),cbc(aes))",
2434                 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2435                 .blocksize = AES_BLOCK_SIZE,
2436                 .template_aead = {
2437                         .setkey = cc_aead_setkey,
2438                         .setauthsize = cc_aead_setauthsize,
2439                         .encrypt = cc_aead_encrypt,
2440                         .decrypt = cc_aead_decrypt,
2441                         .init = cc_aead_init,
2442                         .exit = cc_aead_exit,
2443                         .ivsize = AES_BLOCK_SIZE,
2444                         .maxauthsize = AES_BLOCK_SIZE,
2445                 },
2446                 .cipher_mode = DRV_CIPHER_CBC,
2447                 .flow_mode = S_DIN_to_AES,
2448                 .auth_mode = DRV_HASH_XCBC_MAC,
2449                 .min_hw_rev = CC_HW_REV_630,
2450                 .std_body = CC_STD_NIST,
2451         },
2452         {
2453                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2454                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2455                 .blocksize = 1,
2456                 .template_aead = {
2457                         .setkey = cc_aead_setkey,
2458                         .setauthsize = cc_aead_setauthsize,
2459                         .encrypt = cc_aead_encrypt,
2460                         .decrypt = cc_aead_decrypt,
2461                         .init = cc_aead_init,
2462                         .exit = cc_aead_exit,
2463                         .ivsize = CTR_RFC3686_IV_SIZE,
2464                         .maxauthsize = SHA1_DIGEST_SIZE,
2465                 },
2466                 .cipher_mode = DRV_CIPHER_CTR,
2467                 .flow_mode = S_DIN_to_AES,
2468                 .auth_mode = DRV_HASH_SHA1,
2469                 .min_hw_rev = CC_HW_REV_630,
2470                 .std_body = CC_STD_NIST,
2471         },
2472         {
2473                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2474                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2475                 .blocksize = 1,
2476                 .template_aead = {
2477                         .setkey = cc_aead_setkey,
2478                         .setauthsize = cc_aead_setauthsize,
2479                         .encrypt = cc_aead_encrypt,
2480                         .decrypt = cc_aead_decrypt,
2481                         .init = cc_aead_init,
2482                         .exit = cc_aead_exit,
2483                         .ivsize = CTR_RFC3686_IV_SIZE,
2484                         .maxauthsize = SHA256_DIGEST_SIZE,
2485                 },
2486                 .cipher_mode = DRV_CIPHER_CTR,
2487                 .flow_mode = S_DIN_to_AES,
2488                 .auth_mode = DRV_HASH_SHA256,
2489                 .min_hw_rev = CC_HW_REV_630,
2490                 .std_body = CC_STD_NIST,
2491         },
2492         {
2493                 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2494                 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2495                 .blocksize = 1,
2496                 .template_aead = {
2497                         .setkey = cc_aead_setkey,
2498                         .setauthsize = cc_aead_setauthsize,
2499                         .encrypt = cc_aead_encrypt,
2500                         .decrypt = cc_aead_decrypt,
2501                         .init = cc_aead_init,
2502                         .exit = cc_aead_exit,
2503                         .ivsize = CTR_RFC3686_IV_SIZE,
2504                         .maxauthsize = AES_BLOCK_SIZE,
2505                 },
2506                 .cipher_mode = DRV_CIPHER_CTR,
2507                 .flow_mode = S_DIN_to_AES,
2508                 .auth_mode = DRV_HASH_XCBC_MAC,
2509                 .min_hw_rev = CC_HW_REV_630,
2510                 .std_body = CC_STD_NIST,
2511         },
2512         {
2513                 .name = "ccm(aes)",
2514                 .driver_name = "ccm-aes-ccree",
2515                 .blocksize = 1,
2516                 .template_aead = {
2517                         .setkey = cc_aead_setkey,
2518                         .setauthsize = cc_ccm_setauthsize,
2519                         .encrypt = cc_aead_encrypt,
2520                         .decrypt = cc_aead_decrypt,
2521                         .init = cc_aead_init,
2522                         .exit = cc_aead_exit,
2523                         .ivsize = AES_BLOCK_SIZE,
2524                         .maxauthsize = AES_BLOCK_SIZE,
2525                 },
2526                 .cipher_mode = DRV_CIPHER_CCM,
2527                 .flow_mode = S_DIN_to_AES,
2528                 .auth_mode = DRV_HASH_NULL,
2529                 .min_hw_rev = CC_HW_REV_630,
2530                 .std_body = CC_STD_NIST,
2531         },
2532         {
2533                 .name = "rfc4309(ccm(aes))",
2534                 .driver_name = "rfc4309-ccm-aes-ccree",
2535                 .blocksize = 1,
2536                 .template_aead = {
2537                         .setkey = cc_rfc4309_ccm_setkey,
2538                         .setauthsize = cc_rfc4309_ccm_setauthsize,
2539                         .encrypt = cc_rfc4309_ccm_encrypt,
2540                         .decrypt = cc_rfc4309_ccm_decrypt,
2541                         .init = cc_aead_init,
2542                         .exit = cc_aead_exit,
2543                         .ivsize = CCM_BLOCK_IV_SIZE,
2544                         .maxauthsize = AES_BLOCK_SIZE,
2545                 },
2546                 .cipher_mode = DRV_CIPHER_CCM,
2547                 .flow_mode = S_DIN_to_AES,
2548                 .auth_mode = DRV_HASH_NULL,
2549                 .min_hw_rev = CC_HW_REV_630,
2550                 .std_body = CC_STD_NIST,
2551         },
2552         {
2553                 .name = "gcm(aes)",
2554                 .driver_name = "gcm-aes-ccree",
2555                 .blocksize = 1,
2556                 .template_aead = {
2557                         .setkey = cc_aead_setkey,
2558                         .setauthsize = cc_gcm_setauthsize,
2559                         .encrypt = cc_aead_encrypt,
2560                         .decrypt = cc_aead_decrypt,
2561                         .init = cc_aead_init,
2562                         .exit = cc_aead_exit,
2563                         .ivsize = 12,
2564                         .maxauthsize = AES_BLOCK_SIZE,
2565                 },
2566                 .cipher_mode = DRV_CIPHER_GCTR,
2567                 .flow_mode = S_DIN_to_AES,
2568                 .auth_mode = DRV_HASH_NULL,
2569                 .min_hw_rev = CC_HW_REV_630,
2570                 .std_body = CC_STD_NIST,
2571         },
2572         {
2573                 .name = "rfc4106(gcm(aes))",
2574                 .driver_name = "rfc4106-gcm-aes-ccree",
2575                 .blocksize = 1,
2576                 .template_aead = {
2577                         .setkey = cc_rfc4106_gcm_setkey,
2578                         .setauthsize = cc_rfc4106_gcm_setauthsize,
2579                         .encrypt = cc_rfc4106_gcm_encrypt,
2580                         .decrypt = cc_rfc4106_gcm_decrypt,
2581                         .init = cc_aead_init,
2582                         .exit = cc_aead_exit,
2583                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2584                         .maxauthsize = AES_BLOCK_SIZE,
2585                 },
2586                 .cipher_mode = DRV_CIPHER_GCTR,
2587                 .flow_mode = S_DIN_to_AES,
2588                 .auth_mode = DRV_HASH_NULL,
2589                 .min_hw_rev = CC_HW_REV_630,
2590                 .std_body = CC_STD_NIST,
2591         },
2592         {
2593                 .name = "rfc4543(gcm(aes))",
2594                 .driver_name = "rfc4543-gcm-aes-ccree",
2595                 .blocksize = 1,
2596                 .template_aead = {
2597                         .setkey = cc_rfc4543_gcm_setkey,
2598                         .setauthsize = cc_rfc4543_gcm_setauthsize,
2599                         .encrypt = cc_rfc4543_gcm_encrypt,
2600                         .decrypt = cc_rfc4543_gcm_decrypt,
2601                         .init = cc_aead_init,
2602                         .exit = cc_aead_exit,
2603                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2604                         .maxauthsize = AES_BLOCK_SIZE,
2605                 },
2606                 .cipher_mode = DRV_CIPHER_GCTR,
2607                 .flow_mode = S_DIN_to_AES,
2608                 .auth_mode = DRV_HASH_NULL,
2609                 .min_hw_rev = CC_HW_REV_630,
2610                 .std_body = CC_STD_NIST,
2611         },
2612 };
2613
2614 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2615                                                 struct device *dev)
2616 {
2617         struct cc_crypto_alg *t_alg;
2618         struct aead_alg *alg;
2619
2620         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2621         if (!t_alg)
2622                 return ERR_PTR(-ENOMEM);
2623
2624         alg = &tmpl->template_aead;
2625
2626         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2627         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2628                  tmpl->driver_name);
2629         alg->base.cra_module = THIS_MODULE;
2630         alg->base.cra_priority = CC_CRA_PRIO;
2631
2632         alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2633         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2634         alg->init = cc_aead_init;
2635         alg->exit = cc_aead_exit;
2636
2637         t_alg->aead_alg = *alg;
2638
2639         t_alg->cipher_mode = tmpl->cipher_mode;
2640         t_alg->flow_mode = tmpl->flow_mode;
2641         t_alg->auth_mode = tmpl->auth_mode;
2642
2643         return t_alg;
2644 }
2645
2646 int cc_aead_free(struct cc_drvdata *drvdata)
2647 {
2648         struct cc_crypto_alg *t_alg, *n;
2649         struct cc_aead_handle *aead_handle =
2650                 (struct cc_aead_handle *)drvdata->aead_handle;
2651
2652         if (aead_handle) {
2653                 /* Remove registered algs */
2654                 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2655                                          entry) {
2656                         crypto_unregister_aead(&t_alg->aead_alg);
2657                         list_del(&t_alg->entry);
2658                         kfree(t_alg);
2659                 }
2660                 kfree(aead_handle);
2661                 drvdata->aead_handle = NULL;
2662         }
2663
2664         return 0;
2665 }
2666
2667 int cc_aead_alloc(struct cc_drvdata *drvdata)
2668 {
2669         struct cc_aead_handle *aead_handle;
2670         struct cc_crypto_alg *t_alg;
2671         int rc = -ENOMEM;
2672         int alg;
2673         struct device *dev = drvdata_to_dev(drvdata);
2674
2675         aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2676         if (!aead_handle) {
2677                 rc = -ENOMEM;
2678                 goto fail0;
2679         }
2680
2681         INIT_LIST_HEAD(&aead_handle->aead_list);
2682         drvdata->aead_handle = aead_handle;
2683
2684         aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2685                                                          MAX_HMAC_DIGEST_SIZE);
2686
2687         if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2688                 dev_err(dev, "SRAM pool exhausted\n");
2689                 rc = -ENOMEM;
2690                 goto fail1;
2691         }
2692
2693         /* Linux crypto */
2694         for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2695                 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2696                     !(drvdata->std_bodies & aead_algs[alg].std_body))
2697                         continue;
2698
2699                 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2700                 if (IS_ERR(t_alg)) {
2701                         rc = PTR_ERR(t_alg);
2702                         dev_err(dev, "%s alg allocation failed\n",
2703                                 aead_algs[alg].driver_name);
2704                         goto fail1;
2705                 }
2706                 t_alg->drvdata = drvdata;
2707                 rc = crypto_register_aead(&t_alg->aead_alg);
2708                 if (rc) {
2709                         dev_err(dev, "%s alg registration failed\n",
2710                                 t_alg->aead_alg.base.cra_driver_name);
2711                         goto fail2;
2712                 } else {
2713                         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2714                         dev_dbg(dev, "Registered %s\n",
2715                                 t_alg->aead_alg.base.cra_driver_name);
2716                 }
2717         }
2718
2719         return 0;
2720
2721 fail2:
2722         kfree(t_alg);
2723 fail1:
2724         cc_aead_free(drvdata);
2725 fail0:
2726         return rc;
2727 }