1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
14 #include "cc_request_mgr.h"
16 #include "cc_sram_mgr.h"
18 #define template_aead template_u.aead
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
26 #define AES_CCM_RFC4309_NONCE_SIZE 3
27 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
29 /* Value of each ICV_CMP byte (of 8) in case of success */
30 #define ICV_VERIF_OK 0x01
32 struct cc_aead_handle {
33 cc_sram_addr_t sram_workspace_addr;
34 struct list_head aead_list;
39 u8 *ipad_opad; /* IPAD, OPAD*/
40 dma_addr_t padded_authkey_dma_addr;
41 dma_addr_t ipad_opad_dma_addr;
45 u8 *xcbc_keys; /* K1,K2,K3 */
46 dma_addr_t xcbc_keys_dma_addr;
50 struct cc_drvdata *drvdata;
51 u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
53 dma_addr_t enckey_dma_addr;
55 struct cc_hmac_s hmac;
56 struct cc_xcbc_s xcbc;
58 unsigned int enc_keylen;
59 unsigned int auth_keylen;
60 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
61 unsigned int hash_len;
62 enum drv_cipher_mode cipher_mode;
63 enum cc_flow_mode flow_mode;
64 enum drv_hash_mode auth_mode;
67 static inline bool valid_assoclen(struct aead_request *req)
69 return ((req->assoclen == 16) || (req->assoclen == 20));
72 static void cc_aead_exit(struct crypto_aead *tfm)
74 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
75 struct device *dev = drvdata_to_dev(ctx->drvdata);
77 dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
78 crypto_tfm_alg_name(&tfm->base));
80 /* Unmap enckey buffer */
82 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
83 ctx->enckey_dma_addr);
84 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
85 &ctx->enckey_dma_addr);
86 ctx->enckey_dma_addr = 0;
90 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
91 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
93 if (xcbc->xcbc_keys) {
94 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
96 xcbc->xcbc_keys_dma_addr);
98 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
99 &xcbc->xcbc_keys_dma_addr);
100 xcbc->xcbc_keys_dma_addr = 0;
101 xcbc->xcbc_keys = NULL;
102 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
103 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
105 if (hmac->ipad_opad) {
106 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
108 hmac->ipad_opad_dma_addr);
109 dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
110 &hmac->ipad_opad_dma_addr);
111 hmac->ipad_opad_dma_addr = 0;
112 hmac->ipad_opad = NULL;
114 if (hmac->padded_authkey) {
115 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
116 hmac->padded_authkey,
117 hmac->padded_authkey_dma_addr);
118 dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
119 &hmac->padded_authkey_dma_addr);
120 hmac->padded_authkey_dma_addr = 0;
121 hmac->padded_authkey = NULL;
126 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
128 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
130 return cc_get_default_hash_len(ctx->drvdata);
133 static int cc_aead_init(struct crypto_aead *tfm)
135 struct aead_alg *alg = crypto_aead_alg(tfm);
136 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
137 struct cc_crypto_alg *cc_alg =
138 container_of(alg, struct cc_crypto_alg, aead_alg);
139 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
141 dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
142 crypto_tfm_alg_name(&tfm->base));
144 /* Initialize modes in instance */
145 ctx->cipher_mode = cc_alg->cipher_mode;
146 ctx->flow_mode = cc_alg->flow_mode;
147 ctx->auth_mode = cc_alg->auth_mode;
148 ctx->drvdata = cc_alg->drvdata;
149 crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
151 /* Allocate key buffer, cache line aligned */
152 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
153 &ctx->enckey_dma_addr, GFP_KERNEL);
155 dev_err(dev, "Failed allocating key buffer\n");
158 dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
161 /* Set default authlen value */
163 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
164 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
165 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
167 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
168 /* (and temporary for user key - up to 256b) */
169 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
170 &xcbc->xcbc_keys_dma_addr,
172 if (!xcbc->xcbc_keys) {
173 dev_err(dev, "Failed allocating buffer for XCBC keys\n");
176 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
177 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
178 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
179 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
181 /* Allocate dma-coherent buffer for IPAD + OPAD */
182 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
183 &hmac->ipad_opad_dma_addr,
186 if (!hmac->ipad_opad) {
187 dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
191 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
194 hmac->padded_authkey = dma_alloc_coherent(dev,
199 if (!hmac->padded_authkey) {
200 dev_err(dev, "failed to allocate padded_authkey\n");
204 ctx->auth_state.hmac.ipad_opad = NULL;
205 ctx->auth_state.hmac.padded_authkey = NULL;
207 ctx->hash_len = cc_get_aead_hash_len(tfm);
216 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
218 struct aead_request *areq = (struct aead_request *)cc_req;
219 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
220 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
221 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
223 cc_unmap_aead_request(dev, areq);
225 /* Restore ordinary iv pointer */
226 areq->iv = areq_ctx->backup_iv;
231 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
232 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
233 ctx->authsize) != 0) {
234 dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235 ctx->authsize, ctx->cipher_mode);
236 /* In case of payload authentication failure, MUST NOT
237 * revealed the decrypted message --> zero its memory.
239 cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
243 if (areq_ctx->is_icv_fragmented) {
244 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
246 cc_copy_sg_portion(dev, areq_ctx->mac_buf,
247 areq_ctx->dst_sgl, skip,
248 (skip + ctx->authsize),
252 /* If an IV was generated, copy it back to the user provided
255 if (areq_ctx->backup_giv) {
256 if (ctx->cipher_mode == DRV_CIPHER_CTR)
257 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
258 CTR_RFC3686_NONCE_SIZE,
259 CTR_RFC3686_IV_SIZE);
260 else if (ctx->cipher_mode == DRV_CIPHER_CCM)
261 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
262 CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
266 aead_request_complete(areq, err);
269 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
270 struct cc_aead_ctx *ctx)
272 /* Load the AES key */
273 hw_desc_init(&desc[0]);
274 /* We are using for the source/user key the same buffer
275 * as for the output keys, * because after this key loading it
276 * is not needed anymore
278 set_din_type(&desc[0], DMA_DLLI,
279 ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
281 set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
282 set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
283 set_key_size_aes(&desc[0], ctx->auth_keylen);
284 set_flow_mode(&desc[0], S_DIN_to_AES);
285 set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
287 hw_desc_init(&desc[1]);
288 set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
289 set_flow_mode(&desc[1], DIN_AES_DOUT);
290 set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
291 AES_KEYSIZE_128, NS_BIT, 0);
293 hw_desc_init(&desc[2]);
294 set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
295 set_flow_mode(&desc[2], DIN_AES_DOUT);
296 set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
298 AES_KEYSIZE_128, NS_BIT, 0);
300 hw_desc_init(&desc[3]);
301 set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
302 set_flow_mode(&desc[3], DIN_AES_DOUT);
303 set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
304 + 2 * AES_KEYSIZE_128),
305 AES_KEYSIZE_128, NS_BIT, 0);
310 static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
312 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
313 unsigned int digest_ofs = 0;
314 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
315 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
316 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
317 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
318 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
320 unsigned int idx = 0;
323 /* calc derived HMAC key */
324 for (i = 0; i < 2; i++) {
325 /* Load hash initial state */
326 hw_desc_init(&desc[idx]);
327 set_cipher_mode(&desc[idx], hash_mode);
328 set_din_sram(&desc[idx],
329 cc_larval_digest_addr(ctx->drvdata,
332 set_flow_mode(&desc[idx], S_DIN_to_HASH);
333 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
336 /* Load the hash current length*/
337 hw_desc_init(&desc[idx]);
338 set_cipher_mode(&desc[idx], hash_mode);
339 set_din_const(&desc[idx], 0, ctx->hash_len);
340 set_flow_mode(&desc[idx], S_DIN_to_HASH);
341 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
344 /* Prepare ipad key */
345 hw_desc_init(&desc[idx]);
346 set_xor_val(&desc[idx], hmac_pad_const[i]);
347 set_cipher_mode(&desc[idx], hash_mode);
348 set_flow_mode(&desc[idx], S_DIN_to_HASH);
349 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
352 /* Perform HASH update */
353 hw_desc_init(&desc[idx]);
354 set_din_type(&desc[idx], DMA_DLLI,
355 hmac->padded_authkey_dma_addr,
356 SHA256_BLOCK_SIZE, NS_BIT);
357 set_cipher_mode(&desc[idx], hash_mode);
358 set_xor_active(&desc[idx]);
359 set_flow_mode(&desc[idx], DIN_HASH);
363 hw_desc_init(&desc[idx]);
364 set_cipher_mode(&desc[idx], hash_mode);
365 set_dout_dlli(&desc[idx],
366 (hmac->ipad_opad_dma_addr + digest_ofs),
367 digest_size, NS_BIT, 0);
368 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
369 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
370 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
373 digest_ofs += digest_size;
379 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
381 struct device *dev = drvdata_to_dev(ctx->drvdata);
383 dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
384 ctx->enc_keylen, ctx->auth_keylen);
386 switch (ctx->auth_mode) {
388 case DRV_HASH_SHA256:
390 case DRV_HASH_XCBC_MAC:
391 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
392 ctx->auth_keylen != AES_KEYSIZE_192 &&
393 ctx->auth_keylen != AES_KEYSIZE_256)
396 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
397 if (ctx->auth_keylen > 0)
401 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
404 /* Check cipher key size */
405 if (ctx->flow_mode == S_DIN_to_DES) {
406 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
407 dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
411 } else { /* Default assumed to be AES ciphers */
412 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
413 ctx->enc_keylen != AES_KEYSIZE_192 &&
414 ctx->enc_keylen != AES_KEYSIZE_256) {
415 dev_err(dev, "Invalid cipher(AES) key size: %u\n",
421 return 0; /* All tests of keys sizes passed */
424 /* This function prepers the user key so it can pass to the hmac processing
425 * (copy to intenral buffer or hash in case of key longer than block
427 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
430 dma_addr_t key_dma_addr = 0;
431 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
432 struct device *dev = drvdata_to_dev(ctx->drvdata);
433 u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
434 struct cc_crypto_req cc_req = {};
435 unsigned int blocksize;
436 unsigned int digestsize;
437 unsigned int hashmode;
438 unsigned int idx = 0;
440 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
441 dma_addr_t padded_authkey_dma_addr =
442 ctx->auth_state.hmac.padded_authkey_dma_addr;
444 switch (ctx->auth_mode) { /* auth_key required and >0 */
446 blocksize = SHA1_BLOCK_SIZE;
447 digestsize = SHA1_DIGEST_SIZE;
448 hashmode = DRV_HASH_HW_SHA1;
450 case DRV_HASH_SHA256:
452 blocksize = SHA256_BLOCK_SIZE;
453 digestsize = SHA256_DIGEST_SIZE;
454 hashmode = DRV_HASH_HW_SHA256;
458 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
460 if (dma_mapping_error(dev, key_dma_addr)) {
461 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
465 if (keylen > blocksize) {
466 /* Load hash initial state */
467 hw_desc_init(&desc[idx]);
468 set_cipher_mode(&desc[idx], hashmode);
469 set_din_sram(&desc[idx], larval_addr, digestsize);
470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
471 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
474 /* Load the hash current length*/
475 hw_desc_init(&desc[idx]);
476 set_cipher_mode(&desc[idx], hashmode);
477 set_din_const(&desc[idx], 0, ctx->hash_len);
478 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
479 set_flow_mode(&desc[idx], S_DIN_to_HASH);
480 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
483 hw_desc_init(&desc[idx]);
484 set_din_type(&desc[idx], DMA_DLLI,
485 key_dma_addr, keylen, NS_BIT);
486 set_flow_mode(&desc[idx], DIN_HASH);
490 hw_desc_init(&desc[idx]);
491 set_cipher_mode(&desc[idx], hashmode);
492 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
493 digestsize, NS_BIT, 0);
494 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
495 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
496 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
497 set_cipher_config0(&desc[idx],
498 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
501 hw_desc_init(&desc[idx]);
502 set_din_const(&desc[idx], 0, (blocksize - digestsize));
503 set_flow_mode(&desc[idx], BYPASS);
504 set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
505 digestsize), (blocksize - digestsize),
509 hw_desc_init(&desc[idx]);
510 set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
512 set_flow_mode(&desc[idx], BYPASS);
513 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
517 if ((blocksize - keylen) != 0) {
518 hw_desc_init(&desc[idx]);
519 set_din_const(&desc[idx], 0,
520 (blocksize - keylen));
521 set_flow_mode(&desc[idx], BYPASS);
522 set_dout_dlli(&desc[idx],
523 (padded_authkey_dma_addr +
525 (blocksize - keylen), NS_BIT, 0);
530 hw_desc_init(&desc[idx]);
531 set_din_const(&desc[idx], 0, (blocksize - keylen));
532 set_flow_mode(&desc[idx], BYPASS);
533 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
534 blocksize, NS_BIT, 0);
538 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
540 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
543 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
548 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
552 struct rtattr *rta = (struct rtattr *)key;
553 struct cc_crypto_req cc_req = {};
554 struct crypto_authenc_key_param *param;
555 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
557 unsigned int seq_len = 0;
558 struct device *dev = drvdata_to_dev(ctx->drvdata);
560 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
561 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
563 /* STAT_PHASE_0: Init and sanity checks */
565 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
566 if (!RTA_OK(rta, keylen))
568 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
570 if (RTA_PAYLOAD(rta) < sizeof(*param))
572 param = RTA_DATA(rta);
573 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
574 key += RTA_ALIGN(rta->rta_len);
575 keylen -= RTA_ALIGN(rta->rta_len);
576 if (keylen < ctx->enc_keylen)
578 ctx->auth_keylen = keylen - ctx->enc_keylen;
580 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
581 /* the nonce is stored in bytes at end of key */
582 if (ctx->enc_keylen <
583 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
585 /* Copy nonce from last 4 bytes in CTR key to
586 * first 4 bytes in CTR IV
588 memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
589 ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
590 CTR_RFC3686_NONCE_SIZE);
591 /* Set CTR key size */
592 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
594 } else { /* non-authenc - has just one key */
595 ctx->enc_keylen = keylen;
596 ctx->auth_keylen = 0;
599 rc = validate_keys_sizes(ctx);
603 /* STAT_PHASE_1: Copy key to ctx */
605 /* Get key material */
606 memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
607 if (ctx->enc_keylen == 24)
608 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
609 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
610 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
611 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
612 rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
617 /* STAT_PHASE_2: Create sequence */
619 switch (ctx->auth_mode) {
621 case DRV_HASH_SHA256:
622 seq_len = hmac_setkey(desc, ctx);
624 case DRV_HASH_XCBC_MAC:
625 seq_len = xcbc_setkey(desc, ctx);
627 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
628 break; /* No auth. key setup */
630 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
635 /* STAT_PHASE_3: Submit sequence to HW */
637 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
638 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
640 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
645 /* Update STAT_PHASE_3 */
649 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
655 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
658 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
664 memcpy(ctx->ctr_nonce, key + keylen, 3);
666 return cc_aead_setkey(tfm, key, keylen);
669 static int cc_aead_setauthsize(struct crypto_aead *authenc,
670 unsigned int authsize)
672 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
673 struct device *dev = drvdata_to_dev(ctx->drvdata);
675 /* Unsupported auth. sizes */
677 authsize > crypto_aead_maxauthsize(authenc)) {
681 ctx->authsize = authsize;
682 dev_dbg(dev, "authlen=%d\n", ctx->authsize);
687 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
688 unsigned int authsize)
699 return cc_aead_setauthsize(authenc, authsize);
702 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
703 unsigned int authsize)
718 return cc_aead_setauthsize(authenc, authsize);
721 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
722 struct cc_hw_desc desc[], unsigned int *seq_size)
724 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
725 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
726 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
727 enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
728 unsigned int idx = *seq_size;
729 struct device *dev = drvdata_to_dev(ctx->drvdata);
731 switch (assoc_dma_type) {
732 case CC_DMA_BUF_DLLI:
733 dev_dbg(dev, "ASSOC buffer type DLLI\n");
734 hw_desc_init(&desc[idx]);
735 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
736 areq->assoclen, NS_BIT);
737 set_flow_mode(&desc[idx], flow_mode);
738 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
739 areq_ctx->cryptlen > 0)
740 set_din_not_last_indication(&desc[idx]);
742 case CC_DMA_BUF_MLLI:
743 dev_dbg(dev, "ASSOC buffer type MLLI\n");
744 hw_desc_init(&desc[idx]);
745 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
746 areq_ctx->assoc.mlli_nents, NS_BIT);
747 set_flow_mode(&desc[idx], flow_mode);
748 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
749 areq_ctx->cryptlen > 0)
750 set_din_not_last_indication(&desc[idx]);
752 case CC_DMA_BUF_NULL:
754 dev_err(dev, "Invalid ASSOC buffer type\n");
760 static void cc_proc_authen_desc(struct aead_request *areq,
761 unsigned int flow_mode,
762 struct cc_hw_desc desc[],
763 unsigned int *seq_size, int direct)
765 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
766 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
767 unsigned int idx = *seq_size;
768 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
769 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
770 struct device *dev = drvdata_to_dev(ctx->drvdata);
772 switch (data_dma_type) {
773 case CC_DMA_BUF_DLLI:
775 struct scatterlist *cipher =
776 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
777 areq_ctx->dst_sgl : areq_ctx->src_sgl;
779 unsigned int offset =
780 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
781 areq_ctx->dst_offset : areq_ctx->src_offset;
782 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
783 hw_desc_init(&desc[idx]);
784 set_din_type(&desc[idx], DMA_DLLI,
785 (sg_dma_address(cipher) + offset),
786 areq_ctx->cryptlen, NS_BIT);
787 set_flow_mode(&desc[idx], flow_mode);
790 case CC_DMA_BUF_MLLI:
792 /* DOUBLE-PASS flow (as default)
793 * assoc. + iv + data -compact in one table
794 * if assoclen is ZERO only IV perform
796 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
797 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
799 if (areq_ctx->is_single_pass) {
800 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
801 mlli_addr = areq_ctx->dst.sram_addr;
802 mlli_nents = areq_ctx->dst.mlli_nents;
804 mlli_addr = areq_ctx->src.sram_addr;
805 mlli_nents = areq_ctx->src.mlli_nents;
809 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
810 hw_desc_init(&desc[idx]);
811 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
813 set_flow_mode(&desc[idx], flow_mode);
816 case CC_DMA_BUF_NULL:
818 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
824 static void cc_proc_cipher_desc(struct aead_request *areq,
825 unsigned int flow_mode,
826 struct cc_hw_desc desc[],
827 unsigned int *seq_size)
829 unsigned int idx = *seq_size;
830 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
831 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
832 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
833 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
834 struct device *dev = drvdata_to_dev(ctx->drvdata);
836 if (areq_ctx->cryptlen == 0)
837 return; /*null processing*/
839 switch (data_dma_type) {
840 case CC_DMA_BUF_DLLI:
841 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
842 hw_desc_init(&desc[idx]);
843 set_din_type(&desc[idx], DMA_DLLI,
844 (sg_dma_address(areq_ctx->src_sgl) +
845 areq_ctx->src_offset), areq_ctx->cryptlen,
847 set_dout_dlli(&desc[idx],
848 (sg_dma_address(areq_ctx->dst_sgl) +
849 areq_ctx->dst_offset),
850 areq_ctx->cryptlen, NS_BIT, 0);
851 set_flow_mode(&desc[idx], flow_mode);
853 case CC_DMA_BUF_MLLI:
854 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
855 hw_desc_init(&desc[idx]);
856 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
857 areq_ctx->src.mlli_nents, NS_BIT);
858 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
859 areq_ctx->dst.mlli_nents, NS_BIT, 0);
860 set_flow_mode(&desc[idx], flow_mode);
862 case CC_DMA_BUF_NULL:
864 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
870 static void cc_proc_digest_desc(struct aead_request *req,
871 struct cc_hw_desc desc[],
872 unsigned int *seq_size)
874 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
875 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
876 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
877 unsigned int idx = *seq_size;
878 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
879 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
880 int direct = req_ctx->gen_ctx.op_type;
882 /* Get final ICV result */
883 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
884 hw_desc_init(&desc[idx]);
885 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
886 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
887 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
889 set_queue_last_ind(ctx->drvdata, &desc[idx]);
890 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
891 set_aes_not_hash_mode(&desc[idx]);
892 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
894 set_cipher_config0(&desc[idx],
895 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
896 set_cipher_mode(&desc[idx], hash_mode);
899 /* Get ICV out from hardware */
900 hw_desc_init(&desc[idx]);
901 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
902 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
903 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
904 ctx->authsize, NS_BIT, 1);
905 set_queue_last_ind(ctx->drvdata, &desc[idx]);
906 set_cipher_config0(&desc[idx],
907 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
908 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
909 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
910 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
911 set_aes_not_hash_mode(&desc[idx]);
913 set_cipher_mode(&desc[idx], hash_mode);
920 static void cc_set_cipher_desc(struct aead_request *req,
921 struct cc_hw_desc desc[],
922 unsigned int *seq_size)
924 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
925 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
926 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
927 unsigned int hw_iv_size = req_ctx->hw_iv_size;
928 unsigned int idx = *seq_size;
929 int direct = req_ctx->gen_ctx.op_type;
931 /* Setup cipher state */
932 hw_desc_init(&desc[idx]);
933 set_cipher_config0(&desc[idx], direct);
934 set_flow_mode(&desc[idx], ctx->flow_mode);
935 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
937 if (ctx->cipher_mode == DRV_CIPHER_CTR)
938 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
940 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
941 set_cipher_mode(&desc[idx], ctx->cipher_mode);
945 hw_desc_init(&desc[idx]);
946 set_cipher_config0(&desc[idx], direct);
947 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
948 set_flow_mode(&desc[idx], ctx->flow_mode);
949 if (ctx->flow_mode == S_DIN_to_AES) {
950 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
951 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
952 ctx->enc_keylen), NS_BIT);
953 set_key_size_aes(&desc[idx], ctx->enc_keylen);
955 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
956 ctx->enc_keylen, NS_BIT);
957 set_key_size_des(&desc[idx], ctx->enc_keylen);
959 set_cipher_mode(&desc[idx], ctx->cipher_mode);
965 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
966 unsigned int *seq_size, unsigned int data_flow_mode)
968 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
969 int direct = req_ctx->gen_ctx.op_type;
970 unsigned int idx = *seq_size;
972 if (req_ctx->cryptlen == 0)
973 return; /*null processing*/
975 cc_set_cipher_desc(req, desc, &idx);
976 cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
977 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
978 /* We must wait for DMA to write all cipher */
979 hw_desc_init(&desc[idx]);
980 set_din_no_dma(&desc[idx], 0, 0xfffff0);
981 set_dout_no_dma(&desc[idx], 0, 0, 1);
988 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
989 unsigned int *seq_size)
991 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
992 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
993 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
994 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
995 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
996 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
997 unsigned int idx = *seq_size;
999 /* Loading hash ipad xor key state */
1000 hw_desc_init(&desc[idx]);
1001 set_cipher_mode(&desc[idx], hash_mode);
1002 set_din_type(&desc[idx], DMA_DLLI,
1003 ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1005 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1009 /* Load init. digest len (64 bytes) */
1010 hw_desc_init(&desc[idx]);
1011 set_cipher_mode(&desc[idx], hash_mode);
1012 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1014 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1021 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1022 unsigned int *seq_size)
1024 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1025 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1026 unsigned int idx = *seq_size;
1028 /* Loading MAC state */
1029 hw_desc_init(&desc[idx]);
1030 set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1031 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1032 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1033 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1034 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1035 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1036 set_aes_not_hash_mode(&desc[idx]);
1039 /* Setup XCBC MAC K1 */
1040 hw_desc_init(&desc[idx]);
1041 set_din_type(&desc[idx], DMA_DLLI,
1042 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1043 AES_KEYSIZE_128, NS_BIT);
1044 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1045 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049 set_aes_not_hash_mode(&desc[idx]);
1052 /* Setup XCBC MAC K2 */
1053 hw_desc_init(&desc[idx]);
1054 set_din_type(&desc[idx], DMA_DLLI,
1055 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1056 AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1057 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1058 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062 set_aes_not_hash_mode(&desc[idx]);
1065 /* Setup XCBC MAC K3 */
1066 hw_desc_init(&desc[idx]);
1067 set_din_type(&desc[idx], DMA_DLLI,
1068 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1071 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075 set_aes_not_hash_mode(&desc[idx]);
1081 static void cc_proc_header_desc(struct aead_request *req,
1082 struct cc_hw_desc desc[],
1083 unsigned int *seq_size)
1085 unsigned int idx = *seq_size;
1086 /* Hash associated data */
1087 if (req->assoclen > 0)
1088 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1094 static void cc_proc_scheme_desc(struct aead_request *req,
1095 struct cc_hw_desc desc[],
1096 unsigned int *seq_size)
1098 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1099 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1100 struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1101 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1102 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1103 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1104 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1105 unsigned int idx = *seq_size;
1107 hw_desc_init(&desc[idx]);
1108 set_cipher_mode(&desc[idx], hash_mode);
1109 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1111 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1112 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1113 set_cipher_do(&desc[idx], DO_PAD);
1116 /* Get final ICV result */
1117 hw_desc_init(&desc[idx]);
1118 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1120 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1121 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1122 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1123 set_cipher_mode(&desc[idx], hash_mode);
1126 /* Loading hash opad xor key state */
1127 hw_desc_init(&desc[idx]);
1128 set_cipher_mode(&desc[idx], hash_mode);
1129 set_din_type(&desc[idx], DMA_DLLI,
1130 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1131 digest_size, NS_BIT);
1132 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1133 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1136 /* Load init. digest len (64 bytes) */
1137 hw_desc_init(&desc[idx]);
1138 set_cipher_mode(&desc[idx], hash_mode);
1139 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1141 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1142 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1143 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1146 /* Perform HASH update */
1147 hw_desc_init(&desc[idx]);
1148 set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1150 set_flow_mode(&desc[idx], DIN_HASH);
1156 static void cc_mlli_to_sram(struct aead_request *req,
1157 struct cc_hw_desc desc[], unsigned int *seq_size)
1159 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1160 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1162 struct device *dev = drvdata_to_dev(ctx->drvdata);
1164 if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1165 req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1166 !req_ctx->is_single_pass) {
1167 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1168 (unsigned int)ctx->drvdata->mlli_sram_addr,
1169 req_ctx->mlli_params.mlli_len);
1170 /* Copy MLLI table host-to-sram */
1171 hw_desc_init(&desc[*seq_size]);
1172 set_din_type(&desc[*seq_size], DMA_DLLI,
1173 req_ctx->mlli_params.mlli_dma_addr,
1174 req_ctx->mlli_params.mlli_len, NS_BIT);
1175 set_dout_sram(&desc[*seq_size],
1176 ctx->drvdata->mlli_sram_addr,
1177 req_ctx->mlli_params.mlli_len);
1178 set_flow_mode(&desc[*seq_size], BYPASS);
1183 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1184 enum cc_flow_mode setup_flow_mode,
1185 bool is_single_pass)
1187 enum cc_flow_mode data_flow_mode;
1189 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1190 if (setup_flow_mode == S_DIN_to_AES)
1191 data_flow_mode = is_single_pass ?
1192 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1194 data_flow_mode = is_single_pass ?
1195 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1196 } else { /* Decrypt */
1197 if (setup_flow_mode == S_DIN_to_AES)
1198 data_flow_mode = is_single_pass ?
1199 AES_and_HASH : DIN_AES_DOUT;
1201 data_flow_mode = is_single_pass ?
1202 DES_and_HASH : DIN_DES_DOUT;
1205 return data_flow_mode;
1208 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1209 unsigned int *seq_size)
1211 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1212 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1213 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1214 int direct = req_ctx->gen_ctx.op_type;
1215 unsigned int data_flow_mode =
1216 cc_get_data_flow(direct, ctx->flow_mode,
1217 req_ctx->is_single_pass);
1219 if (req_ctx->is_single_pass) {
1223 cc_set_hmac_desc(req, desc, seq_size);
1224 cc_set_cipher_desc(req, desc, seq_size);
1225 cc_proc_header_desc(req, desc, seq_size);
1226 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1227 cc_proc_scheme_desc(req, desc, seq_size);
1228 cc_proc_digest_desc(req, desc, seq_size);
1234 * Fallback for unsupported single-pass modes,
1235 * i.e. using assoc. data of non-word-multiple
1237 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1238 /* encrypt first.. */
1239 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1240 /* authenc after..*/
1241 cc_set_hmac_desc(req, desc, seq_size);
1242 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1243 cc_proc_scheme_desc(req, desc, seq_size);
1244 cc_proc_digest_desc(req, desc, seq_size);
1246 } else { /*DECRYPT*/
1247 /* authenc first..*/
1248 cc_set_hmac_desc(req, desc, seq_size);
1249 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1250 cc_proc_scheme_desc(req, desc, seq_size);
1251 /* decrypt after.. */
1252 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1253 /* read the digest result with setting the completion bit
1254 * must be after the cipher operation
1256 cc_proc_digest_desc(req, desc, seq_size);
1261 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1262 unsigned int *seq_size)
1264 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1265 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1266 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1267 int direct = req_ctx->gen_ctx.op_type;
1268 unsigned int data_flow_mode =
1269 cc_get_data_flow(direct, ctx->flow_mode,
1270 req_ctx->is_single_pass);
1272 if (req_ctx->is_single_pass) {
1276 cc_set_xcbc_desc(req, desc, seq_size);
1277 cc_set_cipher_desc(req, desc, seq_size);
1278 cc_proc_header_desc(req, desc, seq_size);
1279 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1280 cc_proc_digest_desc(req, desc, seq_size);
1286 * Fallback for unsupported single-pass modes,
1287 * i.e. using assoc. data of non-word-multiple
1289 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1290 /* encrypt first.. */
1291 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1292 /* authenc after.. */
1293 cc_set_xcbc_desc(req, desc, seq_size);
1294 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1295 cc_proc_digest_desc(req, desc, seq_size);
1296 } else { /*DECRYPT*/
1297 /* authenc first.. */
1298 cc_set_xcbc_desc(req, desc, seq_size);
1299 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1300 /* decrypt after..*/
1301 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1302 /* read the digest result with setting the completion bit
1303 * must be after the cipher operation
1305 cc_proc_digest_desc(req, desc, seq_size);
1309 static int validate_data_size(struct cc_aead_ctx *ctx,
1310 enum drv_crypto_direction direct,
1311 struct aead_request *req)
1313 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1314 struct device *dev = drvdata_to_dev(ctx->drvdata);
1315 unsigned int assoclen = req->assoclen;
1316 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1317 (req->cryptlen - ctx->authsize) : req->cryptlen;
1319 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1320 req->cryptlen < ctx->authsize)
1323 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1325 switch (ctx->flow_mode) {
1327 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1328 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1330 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1332 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1333 if (areq_ctx->plaintext_authenticate_only)
1334 areq_ctx->is_single_pass = false;
1338 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1339 areq_ctx->is_single_pass = false;
1341 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1342 !IS_ALIGNED(cipherlen, sizeof(u32)))
1343 areq_ctx->is_single_pass = false;
1347 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1349 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1350 areq_ctx->is_single_pass = false;
1353 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1363 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1365 unsigned int len = 0;
1367 if (header_size == 0)
1370 if (header_size < ((1UL << 16) - (1UL << 8))) {
1373 pa0_buff[0] = (header_size >> 8) & 0xFF;
1374 pa0_buff[1] = header_size & 0xFF;
1380 pa0_buff[2] = (header_size >> 24) & 0xFF;
1381 pa0_buff[3] = (header_size >> 16) & 0xFF;
1382 pa0_buff[4] = (header_size >> 8) & 0xFF;
1383 pa0_buff[5] = header_size & 0xFF;
1389 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1393 memset(block, 0, csize);
1398 else if (msglen > (1 << (8 * csize)))
1401 data = cpu_to_be32(msglen);
1402 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1407 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1408 unsigned int *seq_size)
1410 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1411 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1412 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1413 unsigned int idx = *seq_size;
1414 unsigned int cipher_flow_mode;
1415 dma_addr_t mac_result;
1417 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1418 cipher_flow_mode = AES_to_HASH_and_DOUT;
1419 mac_result = req_ctx->mac_buf_dma_addr;
1420 } else { /* Encrypt */
1421 cipher_flow_mode = AES_and_HASH;
1422 mac_result = req_ctx->icv_dma_addr;
1426 hw_desc_init(&desc[idx]);
1427 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1428 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1429 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1430 ctx->enc_keylen), NS_BIT);
1431 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1432 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1433 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1434 set_flow_mode(&desc[idx], S_DIN_to_AES);
1437 /* load ctr state */
1438 hw_desc_init(&desc[idx]);
1439 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1440 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1441 set_din_type(&desc[idx], DMA_DLLI,
1442 req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1443 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1444 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1445 set_flow_mode(&desc[idx], S_DIN_to_AES);
1449 hw_desc_init(&desc[idx]);
1450 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1451 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1452 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1453 ctx->enc_keylen), NS_BIT);
1454 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1455 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1456 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1457 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1458 set_aes_not_hash_mode(&desc[idx]);
1461 /* load MAC state */
1462 hw_desc_init(&desc[idx]);
1463 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1464 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1465 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1466 AES_BLOCK_SIZE, NS_BIT);
1467 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1468 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1469 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1470 set_aes_not_hash_mode(&desc[idx]);
1473 /* process assoc data */
1474 if (req->assoclen > 0) {
1475 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1477 hw_desc_init(&desc[idx]);
1478 set_din_type(&desc[idx], DMA_DLLI,
1479 sg_dma_address(&req_ctx->ccm_adata_sg),
1480 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1481 set_flow_mode(&desc[idx], DIN_HASH);
1485 /* process the cipher */
1486 if (req_ctx->cryptlen)
1487 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1489 /* Read temporal MAC */
1490 hw_desc_init(&desc[idx]);
1491 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1492 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1494 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1495 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1496 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1497 set_aes_not_hash_mode(&desc[idx]);
1500 /* load AES-CTR state (for last MAC calculation)*/
1501 hw_desc_init(&desc[idx]);
1502 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1503 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1504 set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1505 AES_BLOCK_SIZE, NS_BIT);
1506 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1507 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1508 set_flow_mode(&desc[idx], S_DIN_to_AES);
1511 hw_desc_init(&desc[idx]);
1512 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1513 set_dout_no_dma(&desc[idx], 0, 0, 1);
1516 /* encrypt the "T" value and store MAC in mac_state */
1517 hw_desc_init(&desc[idx]);
1518 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1519 ctx->authsize, NS_BIT);
1520 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1521 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1522 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1529 static int config_ccm_adata(struct aead_request *req)
1531 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1532 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1533 struct device *dev = drvdata_to_dev(ctx->drvdata);
1534 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1535 //unsigned int size_of_a = 0, rem_a_size = 0;
1536 unsigned int lp = req->iv[0];
1537 /* Note: The code assume that req->iv[0] already contains the value
1540 unsigned int l = lp + 1; /* This is L' of RFC 3610. */
1541 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
1542 u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1543 u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1544 u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1545 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1546 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1548 (req->cryptlen - ctx->authsize);
1551 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1552 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1554 /* taken from crypto/ccm.c */
1555 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1556 if (l < 2 || l > 8) {
1557 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1560 memcpy(b0, req->iv, AES_BLOCK_SIZE);
1562 /* format control info per RFC 3610 and
1563 * NIST Special Publication 800-38C
1565 *b0 |= (8 * ((m - 2) / 2));
1566 if (req->assoclen > 0)
1567 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1569 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
1571 dev_err(dev, "message len overflow detected");
1574 /* END of "taken from crypto/ccm.c" */
1576 /* l(a) - size of associated data. */
1577 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1579 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1582 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1583 ctr_count_0[15] = 0;
1588 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1590 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1591 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1592 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1595 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1596 /* For RFC 4309, always use 4 bytes for message length
1597 * (at most 2^32-1 bytes).
1599 areq_ctx->ctr_iv[0] = 3;
1601 /* In RFC 4309 there is an 11-bytes nonce+IV part,
1602 * that we build here.
1604 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1605 CCM_BLOCK_NONCE_SIZE);
1606 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1608 req->iv = areq_ctx->ctr_iv;
1609 req->assoclen -= CCM_BLOCK_IV_SIZE;
1612 static void cc_set_ghash_desc(struct aead_request *req,
1613 struct cc_hw_desc desc[], unsigned int *seq_size)
1615 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1616 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1617 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1618 unsigned int idx = *seq_size;
1620 /* load key to AES*/
1621 hw_desc_init(&desc[idx]);
1622 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1623 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1624 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1625 ctx->enc_keylen, NS_BIT);
1626 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1627 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1628 set_flow_mode(&desc[idx], S_DIN_to_AES);
1631 /* process one zero block to generate hkey */
1632 hw_desc_init(&desc[idx]);
1633 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1634 set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1636 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1639 /* Memory Barrier */
1640 hw_desc_init(&desc[idx]);
1641 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1642 set_dout_no_dma(&desc[idx], 0, 0, 1);
1645 /* Load GHASH subkey */
1646 hw_desc_init(&desc[idx]);
1647 set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1648 AES_BLOCK_SIZE, NS_BIT);
1649 set_dout_no_dma(&desc[idx], 0, 0, 1);
1650 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1651 set_aes_not_hash_mode(&desc[idx]);
1652 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1653 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1654 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1657 /* Configure Hash Engine to work with GHASH.
1658 * Since it was not possible to extend HASH submodes to add GHASH,
1659 * The following command is necessary in order to
1660 * select GHASH (according to HW designers)
1662 hw_desc_init(&desc[idx]);
1663 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1664 set_dout_no_dma(&desc[idx], 0, 0, 1);
1665 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1666 set_aes_not_hash_mode(&desc[idx]);
1667 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1668 set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1669 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1670 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1671 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1674 /* Load GHASH initial STATE (which is 0). (for any hash there is an
1677 hw_desc_init(&desc[idx]);
1678 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1679 set_dout_no_dma(&desc[idx], 0, 0, 1);
1680 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1681 set_aes_not_hash_mode(&desc[idx]);
1682 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1683 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1684 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1690 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1691 unsigned int *seq_size)
1693 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1694 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1695 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1696 unsigned int idx = *seq_size;
1698 /* load key to AES*/
1699 hw_desc_init(&desc[idx]);
1700 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1701 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1702 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1703 ctx->enc_keylen, NS_BIT);
1704 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1705 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1706 set_flow_mode(&desc[idx], S_DIN_to_AES);
1709 if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1710 /* load AES/CTR initial CTR value inc by 2*/
1711 hw_desc_init(&desc[idx]);
1712 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1713 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1714 set_din_type(&desc[idx], DMA_DLLI,
1715 req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1717 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1718 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1719 set_flow_mode(&desc[idx], S_DIN_to_AES);
1726 static void cc_proc_gcm_result(struct aead_request *req,
1727 struct cc_hw_desc desc[],
1728 unsigned int *seq_size)
1730 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1731 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1732 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1733 dma_addr_t mac_result;
1734 unsigned int idx = *seq_size;
1736 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1737 mac_result = req_ctx->mac_buf_dma_addr;
1738 } else { /* Encrypt */
1739 mac_result = req_ctx->icv_dma_addr;
1742 /* process(ghash) gcm_block_len */
1743 hw_desc_init(&desc[idx]);
1744 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1745 AES_BLOCK_SIZE, NS_BIT);
1746 set_flow_mode(&desc[idx], DIN_HASH);
1749 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1750 hw_desc_init(&desc[idx]);
1751 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1752 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1753 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1755 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1756 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1757 set_aes_not_hash_mode(&desc[idx]);
1761 /* load AES/CTR initial CTR value inc by 1*/
1762 hw_desc_init(&desc[idx]);
1763 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1764 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1765 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1766 AES_BLOCK_SIZE, NS_BIT);
1767 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1768 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1769 set_flow_mode(&desc[idx], S_DIN_to_AES);
1772 /* Memory Barrier */
1773 hw_desc_init(&desc[idx]);
1774 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1775 set_dout_no_dma(&desc[idx], 0, 0, 1);
1778 /* process GCTR on stored GHASH and store MAC in mac_state*/
1779 hw_desc_init(&desc[idx]);
1780 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1781 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1782 AES_BLOCK_SIZE, NS_BIT);
1783 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1784 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1785 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1791 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1792 unsigned int *seq_size)
1794 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1795 unsigned int cipher_flow_mode;
1797 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1798 cipher_flow_mode = AES_and_HASH;
1799 } else { /* Encrypt */
1800 cipher_flow_mode = AES_to_HASH_and_DOUT;
1803 //in RFC4543 no data to encrypt. just copy data from src to dest.
1804 if (req_ctx->plaintext_authenticate_only) {
1805 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1806 cc_set_ghash_desc(req, desc, seq_size);
1807 /* process(ghash) assoc data */
1808 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1809 cc_set_gctr_desc(req, desc, seq_size);
1810 cc_proc_gcm_result(req, desc, seq_size);
1814 // for gcm and rfc4106.
1815 cc_set_ghash_desc(req, desc, seq_size);
1816 /* process(ghash) assoc data */
1817 if (req->assoclen > 0)
1818 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1819 cc_set_gctr_desc(req, desc, seq_size);
1820 /* process(gctr+ghash) */
1821 if (req_ctx->cryptlen)
1822 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1823 cc_proc_gcm_result(req, desc, seq_size);
1828 static int config_gcm_context(struct aead_request *req)
1830 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1831 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1832 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1833 struct device *dev = drvdata_to_dev(ctx->drvdata);
1835 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1836 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1838 (req->cryptlen - ctx->authsize);
1839 __be32 counter = cpu_to_be32(2);
1841 dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
1842 __func__, cryptlen, req->assoclen, ctx->authsize);
1844 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1846 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1848 memcpy(req->iv + 12, &counter, 4);
1849 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1851 counter = cpu_to_be32(1);
1852 memcpy(req->iv + 12, &counter, 4);
1853 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1855 if (!req_ctx->plaintext_authenticate_only) {
1858 temp64 = cpu_to_be64(req->assoclen * 8);
1859 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1860 temp64 = cpu_to_be64(cryptlen * 8);
1861 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1863 /* rfc4543=> all data(AAD,IV,Plain) are considered additional
1864 * data that is nothing is encrypted.
1868 temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
1870 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1872 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1878 static void cc_proc_rfc4_gcm(struct aead_request *req)
1880 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1882 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1884 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1885 ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1886 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1887 GCM_BLOCK_RFC4_IV_SIZE);
1888 req->iv = areq_ctx->ctr_iv;
1889 req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1892 static int cc_proc_aead(struct aead_request *req,
1893 enum drv_crypto_direction direct)
1897 struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1898 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1899 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1900 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1901 struct device *dev = drvdata_to_dev(ctx->drvdata);
1902 struct cc_crypto_req cc_req = {};
1904 dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1905 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1906 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1907 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1909 /* STAT_PHASE_0: Init and sanity checks */
1911 /* Check data length according to mode */
1912 if (validate_data_size(ctx, direct, req)) {
1913 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1914 req->cryptlen, req->assoclen);
1915 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1919 /* Setup request structure */
1920 cc_req.user_cb = (void *)cc_aead_complete;
1921 cc_req.user_arg = (void *)req;
1923 /* Setup request context */
1924 areq_ctx->gen_ctx.op_type = direct;
1925 areq_ctx->req_authsize = ctx->authsize;
1926 areq_ctx->cipher_mode = ctx->cipher_mode;
1928 /* STAT_PHASE_1: Map buffers */
1930 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1931 /* Build CTR IV - Copy nonce from last 4 bytes in
1932 * CTR key to first 4 bytes in CTR IV
1934 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1935 CTR_RFC3686_NONCE_SIZE);
1936 if (!areq_ctx->backup_giv) /*User none-generated IV*/
1937 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1938 req->iv, CTR_RFC3686_IV_SIZE);
1939 /* Initialize counter portion of counter block */
1940 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1941 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1943 /* Replace with counter iv */
1944 req->iv = areq_ctx->ctr_iv;
1945 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1946 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1947 (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1948 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1949 if (areq_ctx->ctr_iv != req->iv) {
1950 memcpy(areq_ctx->ctr_iv, req->iv,
1951 crypto_aead_ivsize(tfm));
1952 req->iv = areq_ctx->ctr_iv;
1955 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1958 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1959 rc = config_ccm_adata(req);
1961 dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1966 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1969 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1970 rc = config_gcm_context(req);
1972 dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1978 rc = cc_map_aead_request(ctx->drvdata, req);
1980 dev_err(dev, "map_request() failed\n");
1984 /* do we need to generate IV? */
1985 if (areq_ctx->backup_giv) {
1986 /* set the DMA mapped IV address*/
1987 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1988 cc_req.ivgen_dma_addr[0] =
1989 areq_ctx->gen_ctx.iv_dma_addr +
1990 CTR_RFC3686_NONCE_SIZE;
1991 cc_req.ivgen_dma_addr_len = 1;
1992 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1993 /* In ccm, the IV needs to exist both inside B0 and
1994 * inside the counter.It is also copied to iv_dma_addr
1995 * for other reasons (like returning it to the user).
1996 * So, using 3 (identical) IV outputs.
1998 cc_req.ivgen_dma_addr[0] =
1999 areq_ctx->gen_ctx.iv_dma_addr +
2000 CCM_BLOCK_IV_OFFSET;
2001 cc_req.ivgen_dma_addr[1] =
2002 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2003 CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2004 cc_req.ivgen_dma_addr[2] =
2005 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2006 CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2007 cc_req.ivgen_dma_addr_len = 3;
2009 cc_req.ivgen_dma_addr[0] =
2010 areq_ctx->gen_ctx.iv_dma_addr;
2011 cc_req.ivgen_dma_addr_len = 1;
2014 /* set the IV size (8/16 B long)*/
2015 cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2018 /* STAT_PHASE_2: Create sequence */
2020 /* Load MLLI tables to SRAM if necessary */
2021 cc_mlli_to_sram(req, desc, &seq_len);
2023 /*TODO: move seq len by reference */
2024 switch (ctx->auth_mode) {
2026 case DRV_HASH_SHA256:
2027 cc_hmac_authenc(req, desc, &seq_len);
2029 case DRV_HASH_XCBC_MAC:
2030 cc_xcbc_authenc(req, desc, &seq_len);
2033 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2034 cc_ccm(req, desc, &seq_len);
2035 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2036 cc_gcm(req, desc, &seq_len);
2039 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2040 cc_unmap_aead_request(dev, req);
2045 /* STAT_PHASE_3: Lock HW and push sequence */
2047 rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2049 if (rc != -EINPROGRESS && rc != -EBUSY) {
2050 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2051 cc_unmap_aead_request(dev, req);
2058 static int cc_aead_encrypt(struct aead_request *req)
2060 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2063 /* No generated IV required */
2064 areq_ctx->backup_iv = req->iv;
2065 areq_ctx->backup_giv = NULL;
2066 areq_ctx->is_gcm4543 = false;
2068 areq_ctx->plaintext_authenticate_only = false;
2070 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2071 if (rc != -EINPROGRESS && rc != -EBUSY)
2072 req->iv = areq_ctx->backup_iv;
2077 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2079 /* Very similar to cc_aead_encrypt() above. */
2081 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2082 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2083 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2084 struct device *dev = drvdata_to_dev(ctx->drvdata);
2087 if (!valid_assoclen(req)) {
2088 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2092 /* No generated IV required */
2093 areq_ctx->backup_iv = req->iv;
2094 areq_ctx->backup_giv = NULL;
2095 areq_ctx->is_gcm4543 = true;
2097 cc_proc_rfc4309_ccm(req);
2099 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2100 if (rc != -EINPROGRESS && rc != -EBUSY)
2101 req->iv = areq_ctx->backup_iv;
2106 static int cc_aead_decrypt(struct aead_request *req)
2108 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2111 /* No generated IV required */
2112 areq_ctx->backup_iv = req->iv;
2113 areq_ctx->backup_giv = NULL;
2114 areq_ctx->is_gcm4543 = false;
2116 areq_ctx->plaintext_authenticate_only = false;
2118 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2119 if (rc != -EINPROGRESS && rc != -EBUSY)
2120 req->iv = areq_ctx->backup_iv;
2125 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2127 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2128 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2129 struct device *dev = drvdata_to_dev(ctx->drvdata);
2130 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2133 if (!valid_assoclen(req)) {
2134 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2138 /* No generated IV required */
2139 areq_ctx->backup_iv = req->iv;
2140 areq_ctx->backup_giv = NULL;
2142 areq_ctx->is_gcm4543 = true;
2143 cc_proc_rfc4309_ccm(req);
2145 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2146 if (rc != -EINPROGRESS && rc != -EBUSY)
2147 req->iv = areq_ctx->backup_iv;
2153 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2154 unsigned int keylen)
2156 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2157 struct device *dev = drvdata_to_dev(ctx->drvdata);
2159 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2165 memcpy(ctx->ctr_nonce, key + keylen, 4);
2167 return cc_aead_setkey(tfm, key, keylen);
2170 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2171 unsigned int keylen)
2173 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2174 struct device *dev = drvdata_to_dev(ctx->drvdata);
2176 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2182 memcpy(ctx->ctr_nonce, key + keylen, 4);
2184 return cc_aead_setkey(tfm, key, keylen);
2187 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2188 unsigned int authsize)
2203 return cc_aead_setauthsize(authenc, authsize);
2206 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2207 unsigned int authsize)
2209 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2210 struct device *dev = drvdata_to_dev(ctx->drvdata);
2212 dev_dbg(dev, "authsize %d\n", authsize);
2223 return cc_aead_setauthsize(authenc, authsize);
2226 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2227 unsigned int authsize)
2229 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2230 struct device *dev = drvdata_to_dev(ctx->drvdata);
2232 dev_dbg(dev, "authsize %d\n", authsize);
2237 return cc_aead_setauthsize(authenc, authsize);
2240 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2242 /* Very similar to cc_aead_encrypt() above. */
2244 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2245 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2246 struct device *dev = drvdata_to_dev(ctx->drvdata);
2247 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2250 if (!valid_assoclen(req)) {
2251 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2255 /* No generated IV required */
2256 areq_ctx->backup_iv = req->iv;
2257 areq_ctx->backup_giv = NULL;
2259 areq_ctx->plaintext_authenticate_only = false;
2261 cc_proc_rfc4_gcm(req);
2262 areq_ctx->is_gcm4543 = true;
2264 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2265 if (rc != -EINPROGRESS && rc != -EBUSY)
2266 req->iv = areq_ctx->backup_iv;
2271 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2273 /* Very similar to cc_aead_encrypt() above. */
2275 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2278 //plaintext is not encryped with rfc4543
2279 areq_ctx->plaintext_authenticate_only = true;
2281 /* No generated IV required */
2282 areq_ctx->backup_iv = req->iv;
2283 areq_ctx->backup_giv = NULL;
2285 cc_proc_rfc4_gcm(req);
2286 areq_ctx->is_gcm4543 = true;
2288 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2289 if (rc != -EINPROGRESS && rc != -EBUSY)
2290 req->iv = areq_ctx->backup_iv;
2295 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2297 /* Very similar to cc_aead_decrypt() above. */
2299 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2300 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2301 struct device *dev = drvdata_to_dev(ctx->drvdata);
2302 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2305 if (!valid_assoclen(req)) {
2306 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2310 /* No generated IV required */
2311 areq_ctx->backup_iv = req->iv;
2312 areq_ctx->backup_giv = NULL;
2314 areq_ctx->plaintext_authenticate_only = false;
2316 cc_proc_rfc4_gcm(req);
2317 areq_ctx->is_gcm4543 = true;
2319 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2320 if (rc != -EINPROGRESS && rc != -EBUSY)
2321 req->iv = areq_ctx->backup_iv;
2326 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2328 /* Very similar to cc_aead_decrypt() above. */
2330 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2333 //plaintext is not decryped with rfc4543
2334 areq_ctx->plaintext_authenticate_only = true;
2336 /* No generated IV required */
2337 areq_ctx->backup_iv = req->iv;
2338 areq_ctx->backup_giv = NULL;
2340 cc_proc_rfc4_gcm(req);
2341 areq_ctx->is_gcm4543 = true;
2343 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2344 if (rc != -EINPROGRESS && rc != -EBUSY)
2345 req->iv = areq_ctx->backup_iv;
2351 static struct cc_alg_template aead_algs[] = {
2353 .name = "authenc(hmac(sha1),cbc(aes))",
2354 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2355 .blocksize = AES_BLOCK_SIZE,
2357 .setkey = cc_aead_setkey,
2358 .setauthsize = cc_aead_setauthsize,
2359 .encrypt = cc_aead_encrypt,
2360 .decrypt = cc_aead_decrypt,
2361 .init = cc_aead_init,
2362 .exit = cc_aead_exit,
2363 .ivsize = AES_BLOCK_SIZE,
2364 .maxauthsize = SHA1_DIGEST_SIZE,
2366 .cipher_mode = DRV_CIPHER_CBC,
2367 .flow_mode = S_DIN_to_AES,
2368 .auth_mode = DRV_HASH_SHA1,
2369 .min_hw_rev = CC_HW_REV_630,
2370 .std_body = CC_STD_NIST,
2373 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2374 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2375 .blocksize = DES3_EDE_BLOCK_SIZE,
2377 .setkey = cc_aead_setkey,
2378 .setauthsize = cc_aead_setauthsize,
2379 .encrypt = cc_aead_encrypt,
2380 .decrypt = cc_aead_decrypt,
2381 .init = cc_aead_init,
2382 .exit = cc_aead_exit,
2383 .ivsize = DES3_EDE_BLOCK_SIZE,
2384 .maxauthsize = SHA1_DIGEST_SIZE,
2386 .cipher_mode = DRV_CIPHER_CBC,
2387 .flow_mode = S_DIN_to_DES,
2388 .auth_mode = DRV_HASH_SHA1,
2389 .min_hw_rev = CC_HW_REV_630,
2390 .std_body = CC_STD_NIST,
2393 .name = "authenc(hmac(sha256),cbc(aes))",
2394 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2395 .blocksize = AES_BLOCK_SIZE,
2397 .setkey = cc_aead_setkey,
2398 .setauthsize = cc_aead_setauthsize,
2399 .encrypt = cc_aead_encrypt,
2400 .decrypt = cc_aead_decrypt,
2401 .init = cc_aead_init,
2402 .exit = cc_aead_exit,
2403 .ivsize = AES_BLOCK_SIZE,
2404 .maxauthsize = SHA256_DIGEST_SIZE,
2406 .cipher_mode = DRV_CIPHER_CBC,
2407 .flow_mode = S_DIN_to_AES,
2408 .auth_mode = DRV_HASH_SHA256,
2409 .min_hw_rev = CC_HW_REV_630,
2410 .std_body = CC_STD_NIST,
2413 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2414 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2415 .blocksize = DES3_EDE_BLOCK_SIZE,
2417 .setkey = cc_aead_setkey,
2418 .setauthsize = cc_aead_setauthsize,
2419 .encrypt = cc_aead_encrypt,
2420 .decrypt = cc_aead_decrypt,
2421 .init = cc_aead_init,
2422 .exit = cc_aead_exit,
2423 .ivsize = DES3_EDE_BLOCK_SIZE,
2424 .maxauthsize = SHA256_DIGEST_SIZE,
2426 .cipher_mode = DRV_CIPHER_CBC,
2427 .flow_mode = S_DIN_to_DES,
2428 .auth_mode = DRV_HASH_SHA256,
2429 .min_hw_rev = CC_HW_REV_630,
2430 .std_body = CC_STD_NIST,
2433 .name = "authenc(xcbc(aes),cbc(aes))",
2434 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2435 .blocksize = AES_BLOCK_SIZE,
2437 .setkey = cc_aead_setkey,
2438 .setauthsize = cc_aead_setauthsize,
2439 .encrypt = cc_aead_encrypt,
2440 .decrypt = cc_aead_decrypt,
2441 .init = cc_aead_init,
2442 .exit = cc_aead_exit,
2443 .ivsize = AES_BLOCK_SIZE,
2444 .maxauthsize = AES_BLOCK_SIZE,
2446 .cipher_mode = DRV_CIPHER_CBC,
2447 .flow_mode = S_DIN_to_AES,
2448 .auth_mode = DRV_HASH_XCBC_MAC,
2449 .min_hw_rev = CC_HW_REV_630,
2450 .std_body = CC_STD_NIST,
2453 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2454 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2457 .setkey = cc_aead_setkey,
2458 .setauthsize = cc_aead_setauthsize,
2459 .encrypt = cc_aead_encrypt,
2460 .decrypt = cc_aead_decrypt,
2461 .init = cc_aead_init,
2462 .exit = cc_aead_exit,
2463 .ivsize = CTR_RFC3686_IV_SIZE,
2464 .maxauthsize = SHA1_DIGEST_SIZE,
2466 .cipher_mode = DRV_CIPHER_CTR,
2467 .flow_mode = S_DIN_to_AES,
2468 .auth_mode = DRV_HASH_SHA1,
2469 .min_hw_rev = CC_HW_REV_630,
2470 .std_body = CC_STD_NIST,
2473 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2474 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2477 .setkey = cc_aead_setkey,
2478 .setauthsize = cc_aead_setauthsize,
2479 .encrypt = cc_aead_encrypt,
2480 .decrypt = cc_aead_decrypt,
2481 .init = cc_aead_init,
2482 .exit = cc_aead_exit,
2483 .ivsize = CTR_RFC3686_IV_SIZE,
2484 .maxauthsize = SHA256_DIGEST_SIZE,
2486 .cipher_mode = DRV_CIPHER_CTR,
2487 .flow_mode = S_DIN_to_AES,
2488 .auth_mode = DRV_HASH_SHA256,
2489 .min_hw_rev = CC_HW_REV_630,
2490 .std_body = CC_STD_NIST,
2493 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2494 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2497 .setkey = cc_aead_setkey,
2498 .setauthsize = cc_aead_setauthsize,
2499 .encrypt = cc_aead_encrypt,
2500 .decrypt = cc_aead_decrypt,
2501 .init = cc_aead_init,
2502 .exit = cc_aead_exit,
2503 .ivsize = CTR_RFC3686_IV_SIZE,
2504 .maxauthsize = AES_BLOCK_SIZE,
2506 .cipher_mode = DRV_CIPHER_CTR,
2507 .flow_mode = S_DIN_to_AES,
2508 .auth_mode = DRV_HASH_XCBC_MAC,
2509 .min_hw_rev = CC_HW_REV_630,
2510 .std_body = CC_STD_NIST,
2514 .driver_name = "ccm-aes-ccree",
2517 .setkey = cc_aead_setkey,
2518 .setauthsize = cc_ccm_setauthsize,
2519 .encrypt = cc_aead_encrypt,
2520 .decrypt = cc_aead_decrypt,
2521 .init = cc_aead_init,
2522 .exit = cc_aead_exit,
2523 .ivsize = AES_BLOCK_SIZE,
2524 .maxauthsize = AES_BLOCK_SIZE,
2526 .cipher_mode = DRV_CIPHER_CCM,
2527 .flow_mode = S_DIN_to_AES,
2528 .auth_mode = DRV_HASH_NULL,
2529 .min_hw_rev = CC_HW_REV_630,
2530 .std_body = CC_STD_NIST,
2533 .name = "rfc4309(ccm(aes))",
2534 .driver_name = "rfc4309-ccm-aes-ccree",
2537 .setkey = cc_rfc4309_ccm_setkey,
2538 .setauthsize = cc_rfc4309_ccm_setauthsize,
2539 .encrypt = cc_rfc4309_ccm_encrypt,
2540 .decrypt = cc_rfc4309_ccm_decrypt,
2541 .init = cc_aead_init,
2542 .exit = cc_aead_exit,
2543 .ivsize = CCM_BLOCK_IV_SIZE,
2544 .maxauthsize = AES_BLOCK_SIZE,
2546 .cipher_mode = DRV_CIPHER_CCM,
2547 .flow_mode = S_DIN_to_AES,
2548 .auth_mode = DRV_HASH_NULL,
2549 .min_hw_rev = CC_HW_REV_630,
2550 .std_body = CC_STD_NIST,
2554 .driver_name = "gcm-aes-ccree",
2557 .setkey = cc_aead_setkey,
2558 .setauthsize = cc_gcm_setauthsize,
2559 .encrypt = cc_aead_encrypt,
2560 .decrypt = cc_aead_decrypt,
2561 .init = cc_aead_init,
2562 .exit = cc_aead_exit,
2564 .maxauthsize = AES_BLOCK_SIZE,
2566 .cipher_mode = DRV_CIPHER_GCTR,
2567 .flow_mode = S_DIN_to_AES,
2568 .auth_mode = DRV_HASH_NULL,
2569 .min_hw_rev = CC_HW_REV_630,
2570 .std_body = CC_STD_NIST,
2573 .name = "rfc4106(gcm(aes))",
2574 .driver_name = "rfc4106-gcm-aes-ccree",
2577 .setkey = cc_rfc4106_gcm_setkey,
2578 .setauthsize = cc_rfc4106_gcm_setauthsize,
2579 .encrypt = cc_rfc4106_gcm_encrypt,
2580 .decrypt = cc_rfc4106_gcm_decrypt,
2581 .init = cc_aead_init,
2582 .exit = cc_aead_exit,
2583 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2584 .maxauthsize = AES_BLOCK_SIZE,
2586 .cipher_mode = DRV_CIPHER_GCTR,
2587 .flow_mode = S_DIN_to_AES,
2588 .auth_mode = DRV_HASH_NULL,
2589 .min_hw_rev = CC_HW_REV_630,
2590 .std_body = CC_STD_NIST,
2593 .name = "rfc4543(gcm(aes))",
2594 .driver_name = "rfc4543-gcm-aes-ccree",
2597 .setkey = cc_rfc4543_gcm_setkey,
2598 .setauthsize = cc_rfc4543_gcm_setauthsize,
2599 .encrypt = cc_rfc4543_gcm_encrypt,
2600 .decrypt = cc_rfc4543_gcm_decrypt,
2601 .init = cc_aead_init,
2602 .exit = cc_aead_exit,
2603 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2604 .maxauthsize = AES_BLOCK_SIZE,
2606 .cipher_mode = DRV_CIPHER_GCTR,
2607 .flow_mode = S_DIN_to_AES,
2608 .auth_mode = DRV_HASH_NULL,
2609 .min_hw_rev = CC_HW_REV_630,
2610 .std_body = CC_STD_NIST,
2614 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2617 struct cc_crypto_alg *t_alg;
2618 struct aead_alg *alg;
2620 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2622 return ERR_PTR(-ENOMEM);
2624 alg = &tmpl->template_aead;
2626 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2627 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2629 alg->base.cra_module = THIS_MODULE;
2630 alg->base.cra_priority = CC_CRA_PRIO;
2632 alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2633 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2634 alg->init = cc_aead_init;
2635 alg->exit = cc_aead_exit;
2637 t_alg->aead_alg = *alg;
2639 t_alg->cipher_mode = tmpl->cipher_mode;
2640 t_alg->flow_mode = tmpl->flow_mode;
2641 t_alg->auth_mode = tmpl->auth_mode;
2646 int cc_aead_free(struct cc_drvdata *drvdata)
2648 struct cc_crypto_alg *t_alg, *n;
2649 struct cc_aead_handle *aead_handle =
2650 (struct cc_aead_handle *)drvdata->aead_handle;
2653 /* Remove registered algs */
2654 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2656 crypto_unregister_aead(&t_alg->aead_alg);
2657 list_del(&t_alg->entry);
2661 drvdata->aead_handle = NULL;
2667 int cc_aead_alloc(struct cc_drvdata *drvdata)
2669 struct cc_aead_handle *aead_handle;
2670 struct cc_crypto_alg *t_alg;
2673 struct device *dev = drvdata_to_dev(drvdata);
2675 aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2681 INIT_LIST_HEAD(&aead_handle->aead_list);
2682 drvdata->aead_handle = aead_handle;
2684 aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2685 MAX_HMAC_DIGEST_SIZE);
2687 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2688 dev_err(dev, "SRAM pool exhausted\n");
2694 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2695 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2696 !(drvdata->std_bodies & aead_algs[alg].std_body))
2699 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2700 if (IS_ERR(t_alg)) {
2701 rc = PTR_ERR(t_alg);
2702 dev_err(dev, "%s alg allocation failed\n",
2703 aead_algs[alg].driver_name);
2706 t_alg->drvdata = drvdata;
2707 rc = crypto_register_aead(&t_alg->aead_alg);
2709 dev_err(dev, "%s alg registration failed\n",
2710 t_alg->aead_alg.base.cra_driver_name);
2713 list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2714 dev_dbg(dev, "Registered %s\n",
2715 t_alg->aead_alg.base.cra_driver_name);
2724 cc_aead_free(drvdata);