2 * Freescale FSL CAAM support for crypto API over QI backend.
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
13 #include "desc_constr.h"
19 #include "caamalg_desc.h"
24 #define CAAM_CRA_PRIORITY 2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
29 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
31 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
33 struct caam_alg_entry {
40 struct caam_aead_alg {
42 struct caam_alg_entry caam;
51 u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 u8 key[CAAM_MAX_KEY_SIZE];
56 enum dma_data_direction dir;
59 unsigned int authsize;
61 spinlock_t lock; /* Protects multiple init of driver context */
62 struct caam_drv_ctx *drv_ctx[NUM_OP];
65 static int aead_set_sh_desc(struct crypto_aead *aead)
67 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
69 struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 unsigned int ivsize = crypto_aead_ivsize(aead);
73 unsigned int data_len[2];
75 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 OP_ALG_AAI_CTR_MOD128);
77 const bool is_rfc3686 = alg->caam.rfc3686;
78 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
80 if (!ctx->cdata.keylen || !ctx->authsize)
84 * AES-CTR needs to load IV in CONTEXT1 reg
85 * at an offset of 128bits (16bytes)
86 * CONTEXT1[255:128] = IV
93 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
96 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
97 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
98 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
101 data_len[0] = ctx->adata.keylen_pad;
102 data_len[1] = ctx->cdata.keylen;
107 /* aead_encrypt shared descriptor */
108 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
109 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
110 DESC_JOB_IO_LEN, data_len, &inl_mask,
111 ARRAY_SIZE(data_len)) < 0)
115 ctx->adata.key_virt = ctx->key;
117 ctx->adata.key_dma = ctx->key_dma;
120 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
122 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
124 ctx->adata.key_inline = !!(inl_mask & 1);
125 ctx->cdata.key_inline = !!(inl_mask & 2);
127 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
128 ivsize, ctx->authsize, is_rfc3686, nonce,
129 ctx1_iv_off, true, ctrlpriv->era);
132 /* aead_decrypt shared descriptor */
133 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
134 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
135 DESC_JOB_IO_LEN, data_len, &inl_mask,
136 ARRAY_SIZE(data_len)) < 0)
140 ctx->adata.key_virt = ctx->key;
142 ctx->adata.key_dma = ctx->key_dma;
145 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
147 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
149 ctx->adata.key_inline = !!(inl_mask & 1);
150 ctx->cdata.key_inline = !!(inl_mask & 2);
152 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
153 ivsize, ctx->authsize, alg->caam.geniv,
154 is_rfc3686, nonce, ctx1_iv_off, true,
157 if (!alg->caam.geniv)
160 /* aead_givencrypt shared descriptor */
161 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
162 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
163 DESC_JOB_IO_LEN, data_len, &inl_mask,
164 ARRAY_SIZE(data_len)) < 0)
168 ctx->adata.key_virt = ctx->key;
170 ctx->adata.key_dma = ctx->key_dma;
173 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
175 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
177 ctx->adata.key_inline = !!(inl_mask & 1);
178 ctx->cdata.key_inline = !!(inl_mask & 2);
180 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
181 ivsize, ctx->authsize, is_rfc3686, nonce,
182 ctx1_iv_off, true, ctrlpriv->era);
188 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
190 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
192 ctx->authsize = authsize;
193 aead_set_sh_desc(authenc);
198 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
201 struct caam_ctx *ctx = crypto_aead_ctx(aead);
202 struct device *jrdev = ctx->jrdev;
203 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
204 struct crypto_authenc_keys keys;
207 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
211 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
212 keys.authkeylen + keys.enckeylen, keys.enckeylen,
214 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
215 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
219 * If DKP is supported, use it in the shared descriptor to generate
222 if (ctrlpriv->era >= 6) {
223 ctx->adata.keylen = keys.authkeylen;
224 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
227 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
230 memcpy(ctx->key, keys.authkey, keys.authkeylen);
231 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
233 dma_sync_single_for_device(jrdev, ctx->key_dma,
234 ctx->adata.keylen_pad +
235 keys.enckeylen, ctx->dir);
239 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
240 keys.authkeylen, CAAM_MAX_KEY_SIZE -
245 /* postpend encryption key to auth split key */
246 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
247 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
248 keys.enckeylen, ctx->dir);
250 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
251 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
252 ctx->adata.keylen_pad + keys.enckeylen, 1);
256 ctx->cdata.keylen = keys.enckeylen;
258 ret = aead_set_sh_desc(aead);
262 /* Now update the driver contexts with the new shared descriptor */
263 if (ctx->drv_ctx[ENCRYPT]) {
264 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
267 dev_err(jrdev, "driver enc context update failed\n");
272 if (ctx->drv_ctx[DECRYPT]) {
273 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
276 dev_err(jrdev, "driver dec context update failed\n");
281 memzero_explicit(&keys, sizeof(keys));
284 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
285 memzero_explicit(&keys, sizeof(keys));
289 static int gcm_set_sh_desc(struct crypto_aead *aead)
291 struct caam_ctx *ctx = crypto_aead_ctx(aead);
292 unsigned int ivsize = crypto_aead_ivsize(aead);
293 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
296 if (!ctx->cdata.keylen || !ctx->authsize)
300 * Job Descriptor and Shared Descriptor
301 * must fit into the 64-word Descriptor h/w Buffer
303 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
304 ctx->cdata.key_inline = true;
305 ctx->cdata.key_virt = ctx->key;
307 ctx->cdata.key_inline = false;
308 ctx->cdata.key_dma = ctx->key_dma;
311 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
312 ctx->authsize, true);
315 * Job Descriptor and Shared Descriptor
316 * must fit into the 64-word Descriptor h/w Buffer
318 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
319 ctx->cdata.key_inline = true;
320 ctx->cdata.key_virt = ctx->key;
322 ctx->cdata.key_inline = false;
323 ctx->cdata.key_dma = ctx->key_dma;
326 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
327 ctx->authsize, true);
332 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
334 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
336 ctx->authsize = authsize;
337 gcm_set_sh_desc(authenc);
342 static int gcm_setkey(struct crypto_aead *aead,
343 const u8 *key, unsigned int keylen)
345 struct caam_ctx *ctx = crypto_aead_ctx(aead);
346 struct device *jrdev = ctx->jrdev;
350 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
351 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
354 memcpy(ctx->key, key, keylen);
355 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
356 ctx->cdata.keylen = keylen;
358 ret = gcm_set_sh_desc(aead);
362 /* Now update the driver contexts with the new shared descriptor */
363 if (ctx->drv_ctx[ENCRYPT]) {
364 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
367 dev_err(jrdev, "driver enc context update failed\n");
372 if (ctx->drv_ctx[DECRYPT]) {
373 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
376 dev_err(jrdev, "driver dec context update failed\n");
384 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
386 struct caam_ctx *ctx = crypto_aead_ctx(aead);
387 unsigned int ivsize = crypto_aead_ivsize(aead);
388 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
391 if (!ctx->cdata.keylen || !ctx->authsize)
394 ctx->cdata.key_virt = ctx->key;
397 * Job Descriptor and Shared Descriptor
398 * must fit into the 64-word Descriptor h/w Buffer
400 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
401 ctx->cdata.key_inline = true;
403 ctx->cdata.key_inline = false;
404 ctx->cdata.key_dma = ctx->key_dma;
407 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
408 ctx->authsize, true);
411 * Job Descriptor and Shared Descriptor
412 * must fit into the 64-word Descriptor h/w Buffer
414 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
415 ctx->cdata.key_inline = true;
417 ctx->cdata.key_inline = false;
418 ctx->cdata.key_dma = ctx->key_dma;
421 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
422 ctx->authsize, true);
427 static int rfc4106_setauthsize(struct crypto_aead *authenc,
428 unsigned int authsize)
430 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
432 ctx->authsize = authsize;
433 rfc4106_set_sh_desc(authenc);
438 static int rfc4106_setkey(struct crypto_aead *aead,
439 const u8 *key, unsigned int keylen)
441 struct caam_ctx *ctx = crypto_aead_ctx(aead);
442 struct device *jrdev = ctx->jrdev;
449 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
450 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
453 memcpy(ctx->key, key, keylen);
455 * The last four bytes of the key material are used as the salt value
456 * in the nonce. Update the AES key length.
458 ctx->cdata.keylen = keylen - 4;
459 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
462 ret = rfc4106_set_sh_desc(aead);
466 /* Now update the driver contexts with the new shared descriptor */
467 if (ctx->drv_ctx[ENCRYPT]) {
468 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
471 dev_err(jrdev, "driver enc context update failed\n");
476 if (ctx->drv_ctx[DECRYPT]) {
477 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
480 dev_err(jrdev, "driver dec context update failed\n");
488 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
490 struct caam_ctx *ctx = crypto_aead_ctx(aead);
491 unsigned int ivsize = crypto_aead_ivsize(aead);
492 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
495 if (!ctx->cdata.keylen || !ctx->authsize)
498 ctx->cdata.key_virt = ctx->key;
501 * Job Descriptor and Shared Descriptor
502 * must fit into the 64-word Descriptor h/w Buffer
504 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
505 ctx->cdata.key_inline = true;
507 ctx->cdata.key_inline = false;
508 ctx->cdata.key_dma = ctx->key_dma;
511 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
512 ctx->authsize, true);
515 * Job Descriptor and Shared Descriptor
516 * must fit into the 64-word Descriptor h/w Buffer
518 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
519 ctx->cdata.key_inline = true;
521 ctx->cdata.key_inline = false;
522 ctx->cdata.key_dma = ctx->key_dma;
525 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
526 ctx->authsize, true);
531 static int rfc4543_setauthsize(struct crypto_aead *authenc,
532 unsigned int authsize)
534 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
536 ctx->authsize = authsize;
537 rfc4543_set_sh_desc(authenc);
542 static int rfc4543_setkey(struct crypto_aead *aead,
543 const u8 *key, unsigned int keylen)
545 struct caam_ctx *ctx = crypto_aead_ctx(aead);
546 struct device *jrdev = ctx->jrdev;
553 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
554 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
557 memcpy(ctx->key, key, keylen);
559 * The last four bytes of the key material are used as the salt value
560 * in the nonce. Update the AES key length.
562 ctx->cdata.keylen = keylen - 4;
563 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
566 ret = rfc4543_set_sh_desc(aead);
570 /* Now update the driver contexts with the new shared descriptor */
571 if (ctx->drv_ctx[ENCRYPT]) {
572 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
575 dev_err(jrdev, "driver enc context update failed\n");
580 if (ctx->drv_ctx[DECRYPT]) {
581 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
584 dev_err(jrdev, "driver dec context update failed\n");
592 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
593 const u8 *key, unsigned int keylen)
595 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
596 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
597 const char *alg_name = crypto_tfm_alg_name(tfm);
598 struct device *jrdev = ctx->jrdev;
599 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
601 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
602 OP_ALG_AAI_CTR_MOD128);
603 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
607 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
608 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
611 * AES-CTR needs to load IV in CONTEXT1 reg
612 * at an offset of 128bits (16bytes)
613 * CONTEXT1[255:128] = IV
620 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
621 * | *key = {KEY, NONCE}
624 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
625 keylen -= CTR_RFC3686_NONCE_SIZE;
628 ctx->cdata.keylen = keylen;
629 ctx->cdata.key_virt = key;
630 ctx->cdata.key_inline = true;
632 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
633 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
634 is_rfc3686, ctx1_iv_off);
635 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
636 is_rfc3686, ctx1_iv_off);
637 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
638 ivsize, is_rfc3686, ctx1_iv_off);
640 /* Now update the driver contexts with the new shared descriptor */
641 if (ctx->drv_ctx[ENCRYPT]) {
642 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
645 dev_err(jrdev, "driver enc context update failed\n");
650 if (ctx->drv_ctx[DECRYPT]) {
651 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
654 dev_err(jrdev, "driver dec context update failed\n");
659 if (ctx->drv_ctx[GIVENCRYPT]) {
660 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
661 ctx->sh_desc_givenc);
663 dev_err(jrdev, "driver givenc context update failed\n");
670 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
674 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
675 const u8 *key, unsigned int keylen)
677 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
678 struct device *jrdev = ctx->jrdev;
681 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
682 crypto_ablkcipher_set_flags(ablkcipher,
683 CRYPTO_TFM_RES_BAD_KEY_LEN);
684 dev_err(jrdev, "key size mismatch\n");
688 ctx->cdata.keylen = keylen;
689 ctx->cdata.key_virt = key;
690 ctx->cdata.key_inline = true;
692 /* xts ablkcipher encrypt, decrypt shared descriptors */
693 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
694 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
696 /* Now update the driver contexts with the new shared descriptor */
697 if (ctx->drv_ctx[ENCRYPT]) {
698 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
701 dev_err(jrdev, "driver enc context update failed\n");
706 if (ctx->drv_ctx[DECRYPT]) {
707 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
710 dev_err(jrdev, "driver dec context update failed\n");
717 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
722 * aead_edesc - s/w-extended aead descriptor
723 * @src_nents: number of segments in input scatterlist
724 * @dst_nents: number of segments in output scatterlist
725 * @iv_dma: dma address of iv for checking continuity and link table
726 * @qm_sg_bytes: length of dma mapped h/w link table
727 * @qm_sg_dma: bus physical mapped address of h/w link table
728 * @assoclen: associated data length, in CAAM endianness
729 * @assoclen_dma: bus physical mapped address of req->assoclen
730 * @drv_req: driver-specific request structure
731 * @sgt: the h/w link table, followed by IV
738 dma_addr_t qm_sg_dma;
739 unsigned int assoclen;
740 dma_addr_t assoclen_dma;
741 struct caam_drv_req drv_req;
742 struct qm_sg_entry sgt[0];
746 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
747 * @src_nents: number of segments in input scatterlist
748 * @dst_nents: number of segments in output scatterlist
749 * @iv_dma: dma address of iv for checking continuity and link table
750 * @qm_sg_bytes: length of dma mapped h/w link table
751 * @qm_sg_dma: bus physical mapped address of h/w link table
752 * @drv_req: driver-specific request structure
753 * @sgt: the h/w link table, followed by IV
755 struct ablkcipher_edesc {
760 dma_addr_t qm_sg_dma;
761 struct caam_drv_req drv_req;
762 struct qm_sg_entry sgt[0];
765 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
769 * This function is called on the fast path with values of 'type'
770 * known at compile time. Invalid arguments are not expected and
771 * thus no checks are made.
773 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
776 if (unlikely(!drv_ctx)) {
777 spin_lock(&ctx->lock);
779 /* Read again to check if some other core init drv_ctx */
780 drv_ctx = ctx->drv_ctx[type];
785 desc = ctx->sh_desc_enc;
786 else if (type == DECRYPT)
787 desc = ctx->sh_desc_dec;
788 else /* (type == GIVENCRYPT) */
789 desc = ctx->sh_desc_givenc;
791 cpu = smp_processor_id();
792 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
793 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
794 drv_ctx->op_type = type;
796 ctx->drv_ctx[type] = drv_ctx;
799 spin_unlock(&ctx->lock);
805 static void caam_unmap(struct device *dev, struct scatterlist *src,
806 struct scatterlist *dst, int src_nents,
807 int dst_nents, dma_addr_t iv_dma, int ivsize,
808 enum optype op_type, dma_addr_t qm_sg_dma,
813 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
814 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
816 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
820 dma_unmap_single(dev, iv_dma, ivsize,
821 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
824 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
827 static void aead_unmap(struct device *dev,
828 struct aead_edesc *edesc,
829 struct aead_request *req)
831 struct crypto_aead *aead = crypto_aead_reqtfm(req);
832 int ivsize = crypto_aead_ivsize(aead);
834 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
835 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
836 edesc->qm_sg_dma, edesc->qm_sg_bytes);
837 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
840 static void ablkcipher_unmap(struct device *dev,
841 struct ablkcipher_edesc *edesc,
842 struct ablkcipher_request *req)
844 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
845 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
847 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
848 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
849 edesc->qm_sg_dma, edesc->qm_sg_bytes);
852 static void aead_done(struct caam_drv_req *drv_req, u32 status)
854 struct device *qidev;
855 struct aead_edesc *edesc;
856 struct aead_request *aead_req = drv_req->app_ctx;
857 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
858 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
861 qidev = caam_ctx->qidev;
863 if (unlikely(status)) {
864 u32 ssrc = status & JRSTA_SSRC_MASK;
865 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
867 caam_jr_strstatus(qidev, status);
869 * verify hw auth check passed else return -EBADMSG
871 if (ssrc == JRSTA_SSRC_CCB_ERROR &&
872 err_id == JRSTA_CCBERR_ERRID_ICVCHK)
878 edesc = container_of(drv_req, typeof(*edesc), drv_req);
879 aead_unmap(qidev, edesc, aead_req);
881 aead_request_complete(aead_req, ecode);
882 qi_cache_free(edesc);
886 * allocate and map the aead extended descriptor
888 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
891 struct crypto_aead *aead = crypto_aead_reqtfm(req);
892 struct caam_ctx *ctx = crypto_aead_ctx(aead);
893 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
895 struct device *qidev = ctx->qidev;
896 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
897 GFP_KERNEL : GFP_ATOMIC;
898 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
899 struct aead_edesc *edesc;
900 dma_addr_t qm_sg_dma, iv_dma = 0;
902 unsigned int authsize = ctx->authsize;
903 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
905 struct qm_sg_entry *sg_table, *fd_sgt;
906 struct caam_drv_ctx *drv_ctx;
907 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
909 drv_ctx = get_drv_ctx(ctx, op_type);
910 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
911 return (struct aead_edesc *)drv_ctx;
913 /* allocate space for base edesc and hw desc commands, link tables */
914 edesc = qi_cache_alloc(GFP_DMA | flags);
915 if (unlikely(!edesc)) {
916 dev_err(qidev, "could not allocate extended descriptor\n");
917 return ERR_PTR(-ENOMEM);
920 if (likely(req->src == req->dst)) {
921 src_nents = sg_nents_for_len(req->src, req->assoclen +
923 (encrypt ? authsize : 0));
924 if (unlikely(src_nents < 0)) {
925 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
926 req->assoclen + req->cryptlen +
927 (encrypt ? authsize : 0));
928 qi_cache_free(edesc);
929 return ERR_PTR(src_nents);
932 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
934 if (unlikely(!mapped_src_nents)) {
935 dev_err(qidev, "unable to map source\n");
936 qi_cache_free(edesc);
937 return ERR_PTR(-ENOMEM);
940 src_nents = sg_nents_for_len(req->src, req->assoclen +
942 if (unlikely(src_nents < 0)) {
943 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
944 req->assoclen + req->cryptlen);
945 qi_cache_free(edesc);
946 return ERR_PTR(src_nents);
949 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
951 (encrypt ? authsize :
953 if (unlikely(dst_nents < 0)) {
954 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
955 req->assoclen + req->cryptlen +
956 (encrypt ? authsize : (-authsize)));
957 qi_cache_free(edesc);
958 return ERR_PTR(dst_nents);
962 mapped_src_nents = dma_map_sg(qidev, req->src,
963 src_nents, DMA_TO_DEVICE);
964 if (unlikely(!mapped_src_nents)) {
965 dev_err(qidev, "unable to map source\n");
966 qi_cache_free(edesc);
967 return ERR_PTR(-ENOMEM);
970 mapped_src_nents = 0;
973 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
975 if (unlikely(!mapped_dst_nents)) {
976 dev_err(qidev, "unable to map destination\n");
977 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
978 qi_cache_free(edesc);
979 return ERR_PTR(-ENOMEM);
983 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
984 ivsize = crypto_aead_ivsize(aead);
987 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
988 * Input is not contiguous.
990 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
991 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
992 sg_table = &edesc->sgt[0];
993 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
994 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
995 CAAM_QI_MEMCACHE_SIZE)) {
996 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
998 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1000 qi_cache_free(edesc);
1001 return ERR_PTR(-ENOMEM);
1005 u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1007 /* Make sure IV is located in a DMAable area */
1008 memcpy(iv, req->iv, ivsize);
1010 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1011 if (dma_mapping_error(qidev, iv_dma)) {
1012 dev_err(qidev, "unable to map IV\n");
1013 caam_unmap(qidev, req->src, req->dst, src_nents,
1014 dst_nents, 0, 0, 0, 0, 0);
1015 qi_cache_free(edesc);
1016 return ERR_PTR(-ENOMEM);
1020 edesc->src_nents = src_nents;
1021 edesc->dst_nents = dst_nents;
1022 edesc->iv_dma = iv_dma;
1023 edesc->drv_req.app_ctx = req;
1024 edesc->drv_req.cbk = aead_done;
1025 edesc->drv_req.drv_ctx = drv_ctx;
1027 edesc->assoclen = cpu_to_caam32(req->assoclen);
1028 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1030 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1031 dev_err(qidev, "unable to map assoclen\n");
1032 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1033 iv_dma, ivsize, op_type, 0, 0);
1034 qi_cache_free(edesc);
1035 return ERR_PTR(-ENOMEM);
1038 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1041 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1044 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1045 qm_sg_index += mapped_src_nents;
1047 if (mapped_dst_nents > 1)
1048 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1051 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1052 if (dma_mapping_error(qidev, qm_sg_dma)) {
1053 dev_err(qidev, "unable to map S/G table\n");
1054 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1055 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1056 iv_dma, ivsize, op_type, 0, 0);
1057 qi_cache_free(edesc);
1058 return ERR_PTR(-ENOMEM);
1061 edesc->qm_sg_dma = qm_sg_dma;
1062 edesc->qm_sg_bytes = qm_sg_bytes;
1064 out_len = req->assoclen + req->cryptlen +
1065 (encrypt ? ctx->authsize : (-ctx->authsize));
1066 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1068 fd_sgt = &edesc->drv_req.fd_sgt[0];
1069 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1071 if (req->dst == req->src) {
1072 if (mapped_src_nents == 1)
1073 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1076 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1077 (1 + !!ivsize) * sizeof(*sg_table),
1079 } else if (mapped_dst_nents == 1) {
1080 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1083 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1084 qm_sg_index, out_len, 0);
1090 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1092 struct aead_edesc *edesc;
1093 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1094 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1097 if (unlikely(caam_congested))
1100 /* allocate extended descriptor */
1101 edesc = aead_edesc_alloc(req, encrypt);
1102 if (IS_ERR_OR_NULL(edesc))
1103 return PTR_ERR(edesc);
1105 /* Create and submit job descriptor */
1106 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1110 aead_unmap(ctx->qidev, edesc, req);
1111 qi_cache_free(edesc);
1117 static int aead_encrypt(struct aead_request *req)
1119 return aead_crypt(req, true);
1122 static int aead_decrypt(struct aead_request *req)
1124 return aead_crypt(req, false);
1127 static int ipsec_gcm_encrypt(struct aead_request *req)
1129 if (req->assoclen < 8)
1132 return aead_crypt(req, true);
1135 static int ipsec_gcm_decrypt(struct aead_request *req)
1137 if (req->assoclen < 8)
1140 return aead_crypt(req, false);
1143 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1145 struct ablkcipher_edesc *edesc;
1146 struct ablkcipher_request *req = drv_req->app_ctx;
1147 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1148 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
1149 struct device *qidev = caam_ctx->qidev;
1150 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1153 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1156 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1159 caam_jr_strstatus(qidev, status);
1162 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1163 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1164 edesc->src_nents > 1 ? 100 : ivsize, 1);
1165 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1166 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1167 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1170 ablkcipher_unmap(qidev, edesc, req);
1172 /* In case initial IV was generated, copy it in GIVCIPHER request */
1173 if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1175 struct skcipher_givcrypt_request *greq;
1177 greq = container_of(req, struct skcipher_givcrypt_request,
1179 iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1180 memcpy(greq->giv, iv, ivsize);
1184 * The crypto API expects us to set the IV (req->info) to the last
1185 * ciphertext block. This is used e.g. by the CTS mode.
1187 if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1188 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1191 qi_cache_free(edesc);
1192 ablkcipher_request_complete(req, status);
1195 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1198 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1199 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1200 struct device *qidev = ctx->qidev;
1201 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1202 GFP_KERNEL : GFP_ATOMIC;
1203 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1204 struct ablkcipher_edesc *edesc;
1207 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1208 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1209 struct qm_sg_entry *sg_table, *fd_sgt;
1210 struct caam_drv_ctx *drv_ctx;
1211 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1213 drv_ctx = get_drv_ctx(ctx, op_type);
1214 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1215 return (struct ablkcipher_edesc *)drv_ctx;
1217 src_nents = sg_nents_for_len(req->src, req->nbytes);
1218 if (unlikely(src_nents < 0)) {
1219 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1221 return ERR_PTR(src_nents);
1224 if (unlikely(req->src != req->dst)) {
1225 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1226 if (unlikely(dst_nents < 0)) {
1227 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1229 return ERR_PTR(dst_nents);
1232 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1234 if (unlikely(!mapped_src_nents)) {
1235 dev_err(qidev, "unable to map source\n");
1236 return ERR_PTR(-ENOMEM);
1239 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1241 if (unlikely(!mapped_dst_nents)) {
1242 dev_err(qidev, "unable to map destination\n");
1243 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1244 return ERR_PTR(-ENOMEM);
1247 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1249 if (unlikely(!mapped_src_nents)) {
1250 dev_err(qidev, "unable to map source\n");
1251 return ERR_PTR(-ENOMEM);
1255 qm_sg_ents = 1 + mapped_src_nents;
1256 dst_sg_idx = qm_sg_ents;
1258 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1259 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1260 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1261 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1262 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1263 qm_sg_ents, ivsize);
1264 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1266 return ERR_PTR(-ENOMEM);
1269 /* allocate space for base edesc, link tables and IV */
1270 edesc = qi_cache_alloc(GFP_DMA | flags);
1271 if (unlikely(!edesc)) {
1272 dev_err(qidev, "could not allocate extended descriptor\n");
1273 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1275 return ERR_PTR(-ENOMEM);
1278 /* Make sure IV is located in a DMAable area */
1279 sg_table = &edesc->sgt[0];
1280 iv = (u8 *)(sg_table + qm_sg_ents);
1281 memcpy(iv, req->info, ivsize);
1283 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1284 if (dma_mapping_error(qidev, iv_dma)) {
1285 dev_err(qidev, "unable to map IV\n");
1286 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1288 qi_cache_free(edesc);
1289 return ERR_PTR(-ENOMEM);
1292 edesc->src_nents = src_nents;
1293 edesc->dst_nents = dst_nents;
1294 edesc->iv_dma = iv_dma;
1295 edesc->qm_sg_bytes = qm_sg_bytes;
1296 edesc->drv_req.app_ctx = req;
1297 edesc->drv_req.cbk = ablkcipher_done;
1298 edesc->drv_req.drv_ctx = drv_ctx;
1300 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1301 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1303 if (mapped_dst_nents > 1)
1304 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1307 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1309 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1310 dev_err(qidev, "unable to map S/G table\n");
1311 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1312 iv_dma, ivsize, op_type, 0, 0);
1313 qi_cache_free(edesc);
1314 return ERR_PTR(-ENOMEM);
1317 fd_sgt = &edesc->drv_req.fd_sgt[0];
1319 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1320 ivsize + req->nbytes, 0);
1322 if (req->src == req->dst) {
1323 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1324 sizeof(*sg_table), req->nbytes, 0);
1325 } else if (mapped_dst_nents > 1) {
1326 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1327 sizeof(*sg_table), req->nbytes, 0);
1329 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1336 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1337 struct skcipher_givcrypt_request *creq)
1339 struct ablkcipher_request *req = &creq->creq;
1340 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1341 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1342 struct device *qidev = ctx->qidev;
1343 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1344 GFP_KERNEL : GFP_ATOMIC;
1345 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1346 struct ablkcipher_edesc *edesc;
1349 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1350 struct qm_sg_entry *sg_table, *fd_sgt;
1351 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1352 struct caam_drv_ctx *drv_ctx;
1354 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1355 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1356 return (struct ablkcipher_edesc *)drv_ctx;
1358 src_nents = sg_nents_for_len(req->src, req->nbytes);
1359 if (unlikely(src_nents < 0)) {
1360 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1362 return ERR_PTR(src_nents);
1365 if (unlikely(req->src != req->dst)) {
1366 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1367 if (unlikely(dst_nents < 0)) {
1368 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1370 return ERR_PTR(dst_nents);
1373 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1375 if (unlikely(!mapped_src_nents)) {
1376 dev_err(qidev, "unable to map source\n");
1377 return ERR_PTR(-ENOMEM);
1380 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1382 if (unlikely(!mapped_dst_nents)) {
1383 dev_err(qidev, "unable to map destination\n");
1384 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1385 return ERR_PTR(-ENOMEM);
1388 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1390 if (unlikely(!mapped_src_nents)) {
1391 dev_err(qidev, "unable to map source\n");
1392 return ERR_PTR(-ENOMEM);
1395 dst_nents = src_nents;
1396 mapped_dst_nents = src_nents;
1399 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1400 dst_sg_idx = qm_sg_ents;
1402 qm_sg_ents += 1 + mapped_dst_nents;
1403 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1404 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1405 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1406 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1407 qm_sg_ents, ivsize);
1408 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1410 return ERR_PTR(-ENOMEM);
1413 /* allocate space for base edesc, link tables and IV */
1414 edesc = qi_cache_alloc(GFP_DMA | flags);
1416 dev_err(qidev, "could not allocate extended descriptor\n");
1417 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1419 return ERR_PTR(-ENOMEM);
1422 /* Make sure IV is located in a DMAable area */
1423 sg_table = &edesc->sgt[0];
1424 iv = (u8 *)(sg_table + qm_sg_ents);
1425 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1426 if (dma_mapping_error(qidev, iv_dma)) {
1427 dev_err(qidev, "unable to map IV\n");
1428 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1430 qi_cache_free(edesc);
1431 return ERR_PTR(-ENOMEM);
1434 edesc->src_nents = src_nents;
1435 edesc->dst_nents = dst_nents;
1436 edesc->iv_dma = iv_dma;
1437 edesc->qm_sg_bytes = qm_sg_bytes;
1438 edesc->drv_req.app_ctx = req;
1439 edesc->drv_req.cbk = ablkcipher_done;
1440 edesc->drv_req.drv_ctx = drv_ctx;
1442 if (mapped_src_nents > 1)
1443 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1445 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1446 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1449 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1451 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1452 dev_err(qidev, "unable to map S/G table\n");
1453 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1454 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1455 qi_cache_free(edesc);
1456 return ERR_PTR(-ENOMEM);
1459 fd_sgt = &edesc->drv_req.fd_sgt[0];
1461 if (mapped_src_nents > 1)
1462 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1465 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1468 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1469 sizeof(*sg_table), ivsize + req->nbytes, 0);
1474 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1476 struct ablkcipher_edesc *edesc;
1477 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1478 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1479 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1482 if (unlikely(caam_congested))
1485 /* allocate extended descriptor */
1486 edesc = ablkcipher_edesc_alloc(req, encrypt);
1488 return PTR_ERR(edesc);
1491 * The crypto API expects us to set the IV (req->info) to the last
1495 scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1498 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1502 ablkcipher_unmap(ctx->qidev, edesc, req);
1503 qi_cache_free(edesc);
1509 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1511 return ablkcipher_crypt(req, true);
1514 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1516 return ablkcipher_crypt(req, false);
1519 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1521 struct ablkcipher_request *req = &creq->creq;
1522 struct ablkcipher_edesc *edesc;
1523 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1524 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1527 if (unlikely(caam_congested))
1530 /* allocate extended descriptor */
1531 edesc = ablkcipher_giv_edesc_alloc(creq);
1533 return PTR_ERR(edesc);
1535 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1539 ablkcipher_unmap(ctx->qidev, edesc, req);
1540 qi_cache_free(edesc);
1546 #define template_ablkcipher template_u.ablkcipher
1547 struct caam_alg_template {
1548 char name[CRYPTO_MAX_ALG_NAME];
1549 char driver_name[CRYPTO_MAX_ALG_NAME];
1550 unsigned int blocksize;
1553 struct ablkcipher_alg ablkcipher;
1555 u32 class1_alg_type;
1556 u32 class2_alg_type;
1559 static struct caam_alg_template driver_algs[] = {
1560 /* ablkcipher descriptor */
1563 .driver_name = "cbc-aes-caam-qi",
1564 .blocksize = AES_BLOCK_SIZE,
1565 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1566 .template_ablkcipher = {
1567 .setkey = ablkcipher_setkey,
1568 .encrypt = ablkcipher_encrypt,
1569 .decrypt = ablkcipher_decrypt,
1570 .givencrypt = ablkcipher_givencrypt,
1571 .geniv = "<built-in>",
1572 .min_keysize = AES_MIN_KEY_SIZE,
1573 .max_keysize = AES_MAX_KEY_SIZE,
1574 .ivsize = AES_BLOCK_SIZE,
1576 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1579 .name = "cbc(des3_ede)",
1580 .driver_name = "cbc-3des-caam-qi",
1581 .blocksize = DES3_EDE_BLOCK_SIZE,
1582 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1583 .template_ablkcipher = {
1584 .setkey = ablkcipher_setkey,
1585 .encrypt = ablkcipher_encrypt,
1586 .decrypt = ablkcipher_decrypt,
1587 .givencrypt = ablkcipher_givencrypt,
1588 .geniv = "<built-in>",
1589 .min_keysize = DES3_EDE_KEY_SIZE,
1590 .max_keysize = DES3_EDE_KEY_SIZE,
1591 .ivsize = DES3_EDE_BLOCK_SIZE,
1593 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1597 .driver_name = "cbc-des-caam-qi",
1598 .blocksize = DES_BLOCK_SIZE,
1599 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1600 .template_ablkcipher = {
1601 .setkey = ablkcipher_setkey,
1602 .encrypt = ablkcipher_encrypt,
1603 .decrypt = ablkcipher_decrypt,
1604 .givencrypt = ablkcipher_givencrypt,
1605 .geniv = "<built-in>",
1606 .min_keysize = DES_KEY_SIZE,
1607 .max_keysize = DES_KEY_SIZE,
1608 .ivsize = DES_BLOCK_SIZE,
1610 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1614 .driver_name = "ctr-aes-caam-qi",
1616 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1617 .template_ablkcipher = {
1618 .setkey = ablkcipher_setkey,
1619 .encrypt = ablkcipher_encrypt,
1620 .decrypt = ablkcipher_decrypt,
1622 .min_keysize = AES_MIN_KEY_SIZE,
1623 .max_keysize = AES_MAX_KEY_SIZE,
1624 .ivsize = AES_BLOCK_SIZE,
1626 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1629 .name = "rfc3686(ctr(aes))",
1630 .driver_name = "rfc3686-ctr-aes-caam-qi",
1632 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1633 .template_ablkcipher = {
1634 .setkey = ablkcipher_setkey,
1635 .encrypt = ablkcipher_encrypt,
1636 .decrypt = ablkcipher_decrypt,
1637 .givencrypt = ablkcipher_givencrypt,
1638 .geniv = "<built-in>",
1639 .min_keysize = AES_MIN_KEY_SIZE +
1640 CTR_RFC3686_NONCE_SIZE,
1641 .max_keysize = AES_MAX_KEY_SIZE +
1642 CTR_RFC3686_NONCE_SIZE,
1643 .ivsize = CTR_RFC3686_IV_SIZE,
1645 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1649 .driver_name = "xts-aes-caam-qi",
1650 .blocksize = AES_BLOCK_SIZE,
1651 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1652 .template_ablkcipher = {
1653 .setkey = xts_ablkcipher_setkey,
1654 .encrypt = ablkcipher_encrypt,
1655 .decrypt = ablkcipher_decrypt,
1657 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1658 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1659 .ivsize = AES_BLOCK_SIZE,
1661 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1665 static struct caam_aead_alg driver_aeads[] = {
1669 .cra_name = "rfc4106(gcm(aes))",
1670 .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1673 .setkey = rfc4106_setkey,
1674 .setauthsize = rfc4106_setauthsize,
1675 .encrypt = ipsec_gcm_encrypt,
1676 .decrypt = ipsec_gcm_decrypt,
1678 .maxauthsize = AES_BLOCK_SIZE,
1681 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1687 .cra_name = "rfc4543(gcm(aes))",
1688 .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1691 .setkey = rfc4543_setkey,
1692 .setauthsize = rfc4543_setauthsize,
1693 .encrypt = ipsec_gcm_encrypt,
1694 .decrypt = ipsec_gcm_decrypt,
1696 .maxauthsize = AES_BLOCK_SIZE,
1699 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1702 /* Galois Counter Mode */
1706 .cra_name = "gcm(aes)",
1707 .cra_driver_name = "gcm-aes-caam-qi",
1710 .setkey = gcm_setkey,
1711 .setauthsize = gcm_setauthsize,
1712 .encrypt = aead_encrypt,
1713 .decrypt = aead_decrypt,
1715 .maxauthsize = AES_BLOCK_SIZE,
1718 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1721 /* single-pass ipsec_esp descriptor */
1725 .cra_name = "authenc(hmac(md5),cbc(aes))",
1726 .cra_driver_name = "authenc-hmac-md5-"
1728 .cra_blocksize = AES_BLOCK_SIZE,
1730 .setkey = aead_setkey,
1731 .setauthsize = aead_setauthsize,
1732 .encrypt = aead_encrypt,
1733 .decrypt = aead_decrypt,
1734 .ivsize = AES_BLOCK_SIZE,
1735 .maxauthsize = MD5_DIGEST_SIZE,
1738 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1739 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1740 OP_ALG_AAI_HMAC_PRECOMP,
1746 .cra_name = "echainiv(authenc(hmac(md5),"
1748 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1750 .cra_blocksize = AES_BLOCK_SIZE,
1752 .setkey = aead_setkey,
1753 .setauthsize = aead_setauthsize,
1754 .encrypt = aead_encrypt,
1755 .decrypt = aead_decrypt,
1756 .ivsize = AES_BLOCK_SIZE,
1757 .maxauthsize = MD5_DIGEST_SIZE,
1760 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1761 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1762 OP_ALG_AAI_HMAC_PRECOMP,
1769 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1770 .cra_driver_name = "authenc-hmac-sha1-"
1772 .cra_blocksize = AES_BLOCK_SIZE,
1774 .setkey = aead_setkey,
1775 .setauthsize = aead_setauthsize,
1776 .encrypt = aead_encrypt,
1777 .decrypt = aead_decrypt,
1778 .ivsize = AES_BLOCK_SIZE,
1779 .maxauthsize = SHA1_DIGEST_SIZE,
1782 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1783 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1784 OP_ALG_AAI_HMAC_PRECOMP,
1790 .cra_name = "echainiv(authenc(hmac(sha1),"
1792 .cra_driver_name = "echainiv-authenc-"
1793 "hmac-sha1-cbc-aes-caam-qi",
1794 .cra_blocksize = AES_BLOCK_SIZE,
1796 .setkey = aead_setkey,
1797 .setauthsize = aead_setauthsize,
1798 .encrypt = aead_encrypt,
1799 .decrypt = aead_decrypt,
1800 .ivsize = AES_BLOCK_SIZE,
1801 .maxauthsize = SHA1_DIGEST_SIZE,
1804 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1805 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1806 OP_ALG_AAI_HMAC_PRECOMP,
1813 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1814 .cra_driver_name = "authenc-hmac-sha224-"
1816 .cra_blocksize = AES_BLOCK_SIZE,
1818 .setkey = aead_setkey,
1819 .setauthsize = aead_setauthsize,
1820 .encrypt = aead_encrypt,
1821 .decrypt = aead_decrypt,
1822 .ivsize = AES_BLOCK_SIZE,
1823 .maxauthsize = SHA224_DIGEST_SIZE,
1826 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1827 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1828 OP_ALG_AAI_HMAC_PRECOMP,
1834 .cra_name = "echainiv(authenc(hmac(sha224),"
1836 .cra_driver_name = "echainiv-authenc-"
1837 "hmac-sha224-cbc-aes-caam-qi",
1838 .cra_blocksize = AES_BLOCK_SIZE,
1840 .setkey = aead_setkey,
1841 .setauthsize = aead_setauthsize,
1842 .encrypt = aead_encrypt,
1843 .decrypt = aead_decrypt,
1844 .ivsize = AES_BLOCK_SIZE,
1845 .maxauthsize = SHA224_DIGEST_SIZE,
1848 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1849 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1850 OP_ALG_AAI_HMAC_PRECOMP,
1857 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1858 .cra_driver_name = "authenc-hmac-sha256-"
1860 .cra_blocksize = AES_BLOCK_SIZE,
1862 .setkey = aead_setkey,
1863 .setauthsize = aead_setauthsize,
1864 .encrypt = aead_encrypt,
1865 .decrypt = aead_decrypt,
1866 .ivsize = AES_BLOCK_SIZE,
1867 .maxauthsize = SHA256_DIGEST_SIZE,
1870 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1871 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1872 OP_ALG_AAI_HMAC_PRECOMP,
1878 .cra_name = "echainiv(authenc(hmac(sha256),"
1880 .cra_driver_name = "echainiv-authenc-"
1881 "hmac-sha256-cbc-aes-"
1883 .cra_blocksize = AES_BLOCK_SIZE,
1885 .setkey = aead_setkey,
1886 .setauthsize = aead_setauthsize,
1887 .encrypt = aead_encrypt,
1888 .decrypt = aead_decrypt,
1889 .ivsize = AES_BLOCK_SIZE,
1890 .maxauthsize = SHA256_DIGEST_SIZE,
1893 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1894 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1895 OP_ALG_AAI_HMAC_PRECOMP,
1902 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1903 .cra_driver_name = "authenc-hmac-sha384-"
1905 .cra_blocksize = AES_BLOCK_SIZE,
1907 .setkey = aead_setkey,
1908 .setauthsize = aead_setauthsize,
1909 .encrypt = aead_encrypt,
1910 .decrypt = aead_decrypt,
1911 .ivsize = AES_BLOCK_SIZE,
1912 .maxauthsize = SHA384_DIGEST_SIZE,
1915 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1916 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1917 OP_ALG_AAI_HMAC_PRECOMP,
1923 .cra_name = "echainiv(authenc(hmac(sha384),"
1925 .cra_driver_name = "echainiv-authenc-"
1926 "hmac-sha384-cbc-aes-"
1928 .cra_blocksize = AES_BLOCK_SIZE,
1930 .setkey = aead_setkey,
1931 .setauthsize = aead_setauthsize,
1932 .encrypt = aead_encrypt,
1933 .decrypt = aead_decrypt,
1934 .ivsize = AES_BLOCK_SIZE,
1935 .maxauthsize = SHA384_DIGEST_SIZE,
1938 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1939 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1940 OP_ALG_AAI_HMAC_PRECOMP,
1947 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1948 .cra_driver_name = "authenc-hmac-sha512-"
1950 .cra_blocksize = AES_BLOCK_SIZE,
1952 .setkey = aead_setkey,
1953 .setauthsize = aead_setauthsize,
1954 .encrypt = aead_encrypt,
1955 .decrypt = aead_decrypt,
1956 .ivsize = AES_BLOCK_SIZE,
1957 .maxauthsize = SHA512_DIGEST_SIZE,
1960 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1961 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1962 OP_ALG_AAI_HMAC_PRECOMP,
1968 .cra_name = "echainiv(authenc(hmac(sha512),"
1970 .cra_driver_name = "echainiv-authenc-"
1971 "hmac-sha512-cbc-aes-"
1973 .cra_blocksize = AES_BLOCK_SIZE,
1975 .setkey = aead_setkey,
1976 .setauthsize = aead_setauthsize,
1977 .encrypt = aead_encrypt,
1978 .decrypt = aead_decrypt,
1979 .ivsize = AES_BLOCK_SIZE,
1980 .maxauthsize = SHA512_DIGEST_SIZE,
1983 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1984 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1985 OP_ALG_AAI_HMAC_PRECOMP,
1992 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1993 .cra_driver_name = "authenc-hmac-md5-"
1994 "cbc-des3_ede-caam-qi",
1995 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1997 .setkey = aead_setkey,
1998 .setauthsize = aead_setauthsize,
1999 .encrypt = aead_encrypt,
2000 .decrypt = aead_decrypt,
2001 .ivsize = DES3_EDE_BLOCK_SIZE,
2002 .maxauthsize = MD5_DIGEST_SIZE,
2005 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2006 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2007 OP_ALG_AAI_HMAC_PRECOMP,
2013 .cra_name = "echainiv(authenc(hmac(md5),"
2015 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2016 "cbc-des3_ede-caam-qi",
2017 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2019 .setkey = aead_setkey,
2020 .setauthsize = aead_setauthsize,
2021 .encrypt = aead_encrypt,
2022 .decrypt = aead_decrypt,
2023 .ivsize = DES3_EDE_BLOCK_SIZE,
2024 .maxauthsize = MD5_DIGEST_SIZE,
2027 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2028 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2029 OP_ALG_AAI_HMAC_PRECOMP,
2036 .cra_name = "authenc(hmac(sha1),"
2038 .cra_driver_name = "authenc-hmac-sha1-"
2039 "cbc-des3_ede-caam-qi",
2040 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2042 .setkey = aead_setkey,
2043 .setauthsize = aead_setauthsize,
2044 .encrypt = aead_encrypt,
2045 .decrypt = aead_decrypt,
2046 .ivsize = DES3_EDE_BLOCK_SIZE,
2047 .maxauthsize = SHA1_DIGEST_SIZE,
2050 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2051 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2052 OP_ALG_AAI_HMAC_PRECOMP,
2058 .cra_name = "echainiv(authenc(hmac(sha1),"
2060 .cra_driver_name = "echainiv-authenc-"
2062 "cbc-des3_ede-caam-qi",
2063 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2065 .setkey = aead_setkey,
2066 .setauthsize = aead_setauthsize,
2067 .encrypt = aead_encrypt,
2068 .decrypt = aead_decrypt,
2069 .ivsize = DES3_EDE_BLOCK_SIZE,
2070 .maxauthsize = SHA1_DIGEST_SIZE,
2073 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2074 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2075 OP_ALG_AAI_HMAC_PRECOMP,
2082 .cra_name = "authenc(hmac(sha224),"
2084 .cra_driver_name = "authenc-hmac-sha224-"
2085 "cbc-des3_ede-caam-qi",
2086 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2088 .setkey = aead_setkey,
2089 .setauthsize = aead_setauthsize,
2090 .encrypt = aead_encrypt,
2091 .decrypt = aead_decrypt,
2092 .ivsize = DES3_EDE_BLOCK_SIZE,
2093 .maxauthsize = SHA224_DIGEST_SIZE,
2096 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2097 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2098 OP_ALG_AAI_HMAC_PRECOMP,
2104 .cra_name = "echainiv(authenc(hmac(sha224),"
2106 .cra_driver_name = "echainiv-authenc-"
2108 "cbc-des3_ede-caam-qi",
2109 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2111 .setkey = aead_setkey,
2112 .setauthsize = aead_setauthsize,
2113 .encrypt = aead_encrypt,
2114 .decrypt = aead_decrypt,
2115 .ivsize = DES3_EDE_BLOCK_SIZE,
2116 .maxauthsize = SHA224_DIGEST_SIZE,
2119 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2120 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2121 OP_ALG_AAI_HMAC_PRECOMP,
2128 .cra_name = "authenc(hmac(sha256),"
2130 .cra_driver_name = "authenc-hmac-sha256-"
2131 "cbc-des3_ede-caam-qi",
2132 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2134 .setkey = aead_setkey,
2135 .setauthsize = aead_setauthsize,
2136 .encrypt = aead_encrypt,
2137 .decrypt = aead_decrypt,
2138 .ivsize = DES3_EDE_BLOCK_SIZE,
2139 .maxauthsize = SHA256_DIGEST_SIZE,
2142 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2143 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2144 OP_ALG_AAI_HMAC_PRECOMP,
2150 .cra_name = "echainiv(authenc(hmac(sha256),"
2152 .cra_driver_name = "echainiv-authenc-"
2154 "cbc-des3_ede-caam-qi",
2155 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2157 .setkey = aead_setkey,
2158 .setauthsize = aead_setauthsize,
2159 .encrypt = aead_encrypt,
2160 .decrypt = aead_decrypt,
2161 .ivsize = DES3_EDE_BLOCK_SIZE,
2162 .maxauthsize = SHA256_DIGEST_SIZE,
2165 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2166 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2167 OP_ALG_AAI_HMAC_PRECOMP,
2174 .cra_name = "authenc(hmac(sha384),"
2176 .cra_driver_name = "authenc-hmac-sha384-"
2177 "cbc-des3_ede-caam-qi",
2178 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2180 .setkey = aead_setkey,
2181 .setauthsize = aead_setauthsize,
2182 .encrypt = aead_encrypt,
2183 .decrypt = aead_decrypt,
2184 .ivsize = DES3_EDE_BLOCK_SIZE,
2185 .maxauthsize = SHA384_DIGEST_SIZE,
2188 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2189 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2190 OP_ALG_AAI_HMAC_PRECOMP,
2196 .cra_name = "echainiv(authenc(hmac(sha384),"
2198 .cra_driver_name = "echainiv-authenc-"
2200 "cbc-des3_ede-caam-qi",
2201 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2203 .setkey = aead_setkey,
2204 .setauthsize = aead_setauthsize,
2205 .encrypt = aead_encrypt,
2206 .decrypt = aead_decrypt,
2207 .ivsize = DES3_EDE_BLOCK_SIZE,
2208 .maxauthsize = SHA384_DIGEST_SIZE,
2211 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2212 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2213 OP_ALG_AAI_HMAC_PRECOMP,
2220 .cra_name = "authenc(hmac(sha512),"
2222 .cra_driver_name = "authenc-hmac-sha512-"
2223 "cbc-des3_ede-caam-qi",
2224 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2226 .setkey = aead_setkey,
2227 .setauthsize = aead_setauthsize,
2228 .encrypt = aead_encrypt,
2229 .decrypt = aead_decrypt,
2230 .ivsize = DES3_EDE_BLOCK_SIZE,
2231 .maxauthsize = SHA512_DIGEST_SIZE,
2234 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2235 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2236 OP_ALG_AAI_HMAC_PRECOMP,
2242 .cra_name = "echainiv(authenc(hmac(sha512),"
2244 .cra_driver_name = "echainiv-authenc-"
2246 "cbc-des3_ede-caam-qi",
2247 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2249 .setkey = aead_setkey,
2250 .setauthsize = aead_setauthsize,
2251 .encrypt = aead_encrypt,
2252 .decrypt = aead_decrypt,
2253 .ivsize = DES3_EDE_BLOCK_SIZE,
2254 .maxauthsize = SHA512_DIGEST_SIZE,
2257 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2258 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2259 OP_ALG_AAI_HMAC_PRECOMP,
2266 .cra_name = "authenc(hmac(md5),cbc(des))",
2267 .cra_driver_name = "authenc-hmac-md5-"
2269 .cra_blocksize = DES_BLOCK_SIZE,
2271 .setkey = aead_setkey,
2272 .setauthsize = aead_setauthsize,
2273 .encrypt = aead_encrypt,
2274 .decrypt = aead_decrypt,
2275 .ivsize = DES_BLOCK_SIZE,
2276 .maxauthsize = MD5_DIGEST_SIZE,
2279 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2280 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2281 OP_ALG_AAI_HMAC_PRECOMP,
2287 .cra_name = "echainiv(authenc(hmac(md5),"
2289 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2291 .cra_blocksize = DES_BLOCK_SIZE,
2293 .setkey = aead_setkey,
2294 .setauthsize = aead_setauthsize,
2295 .encrypt = aead_encrypt,
2296 .decrypt = aead_decrypt,
2297 .ivsize = DES_BLOCK_SIZE,
2298 .maxauthsize = MD5_DIGEST_SIZE,
2301 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2302 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2303 OP_ALG_AAI_HMAC_PRECOMP,
2310 .cra_name = "authenc(hmac(sha1),cbc(des))",
2311 .cra_driver_name = "authenc-hmac-sha1-"
2313 .cra_blocksize = DES_BLOCK_SIZE,
2315 .setkey = aead_setkey,
2316 .setauthsize = aead_setauthsize,
2317 .encrypt = aead_encrypt,
2318 .decrypt = aead_decrypt,
2319 .ivsize = DES_BLOCK_SIZE,
2320 .maxauthsize = SHA1_DIGEST_SIZE,
2323 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2324 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2325 OP_ALG_AAI_HMAC_PRECOMP,
2331 .cra_name = "echainiv(authenc(hmac(sha1),"
2333 .cra_driver_name = "echainiv-authenc-"
2334 "hmac-sha1-cbc-des-caam-qi",
2335 .cra_blocksize = DES_BLOCK_SIZE,
2337 .setkey = aead_setkey,
2338 .setauthsize = aead_setauthsize,
2339 .encrypt = aead_encrypt,
2340 .decrypt = aead_decrypt,
2341 .ivsize = DES_BLOCK_SIZE,
2342 .maxauthsize = SHA1_DIGEST_SIZE,
2345 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2346 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2347 OP_ALG_AAI_HMAC_PRECOMP,
2354 .cra_name = "authenc(hmac(sha224),cbc(des))",
2355 .cra_driver_name = "authenc-hmac-sha224-"
2357 .cra_blocksize = DES_BLOCK_SIZE,
2359 .setkey = aead_setkey,
2360 .setauthsize = aead_setauthsize,
2361 .encrypt = aead_encrypt,
2362 .decrypt = aead_decrypt,
2363 .ivsize = DES_BLOCK_SIZE,
2364 .maxauthsize = SHA224_DIGEST_SIZE,
2367 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2368 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2369 OP_ALG_AAI_HMAC_PRECOMP,
2375 .cra_name = "echainiv(authenc(hmac(sha224),"
2377 .cra_driver_name = "echainiv-authenc-"
2378 "hmac-sha224-cbc-des-"
2380 .cra_blocksize = DES_BLOCK_SIZE,
2382 .setkey = aead_setkey,
2383 .setauthsize = aead_setauthsize,
2384 .encrypt = aead_encrypt,
2385 .decrypt = aead_decrypt,
2386 .ivsize = DES_BLOCK_SIZE,
2387 .maxauthsize = SHA224_DIGEST_SIZE,
2390 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2391 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2392 OP_ALG_AAI_HMAC_PRECOMP,
2399 .cra_name = "authenc(hmac(sha256),cbc(des))",
2400 .cra_driver_name = "authenc-hmac-sha256-"
2402 .cra_blocksize = DES_BLOCK_SIZE,
2404 .setkey = aead_setkey,
2405 .setauthsize = aead_setauthsize,
2406 .encrypt = aead_encrypt,
2407 .decrypt = aead_decrypt,
2408 .ivsize = DES_BLOCK_SIZE,
2409 .maxauthsize = SHA256_DIGEST_SIZE,
2412 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2413 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2414 OP_ALG_AAI_HMAC_PRECOMP,
2420 .cra_name = "echainiv(authenc(hmac(sha256),"
2422 .cra_driver_name = "echainiv-authenc-"
2423 "hmac-sha256-cbc-des-"
2425 .cra_blocksize = DES_BLOCK_SIZE,
2427 .setkey = aead_setkey,
2428 .setauthsize = aead_setauthsize,
2429 .encrypt = aead_encrypt,
2430 .decrypt = aead_decrypt,
2431 .ivsize = DES_BLOCK_SIZE,
2432 .maxauthsize = SHA256_DIGEST_SIZE,
2435 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2436 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2437 OP_ALG_AAI_HMAC_PRECOMP,
2444 .cra_name = "authenc(hmac(sha384),cbc(des))",
2445 .cra_driver_name = "authenc-hmac-sha384-"
2447 .cra_blocksize = DES_BLOCK_SIZE,
2449 .setkey = aead_setkey,
2450 .setauthsize = aead_setauthsize,
2451 .encrypt = aead_encrypt,
2452 .decrypt = aead_decrypt,
2453 .ivsize = DES_BLOCK_SIZE,
2454 .maxauthsize = SHA384_DIGEST_SIZE,
2457 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2458 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2459 OP_ALG_AAI_HMAC_PRECOMP,
2465 .cra_name = "echainiv(authenc(hmac(sha384),"
2467 .cra_driver_name = "echainiv-authenc-"
2468 "hmac-sha384-cbc-des-"
2470 .cra_blocksize = DES_BLOCK_SIZE,
2472 .setkey = aead_setkey,
2473 .setauthsize = aead_setauthsize,
2474 .encrypt = aead_encrypt,
2475 .decrypt = aead_decrypt,
2476 .ivsize = DES_BLOCK_SIZE,
2477 .maxauthsize = SHA384_DIGEST_SIZE,
2480 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2481 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2482 OP_ALG_AAI_HMAC_PRECOMP,
2489 .cra_name = "authenc(hmac(sha512),cbc(des))",
2490 .cra_driver_name = "authenc-hmac-sha512-"
2492 .cra_blocksize = DES_BLOCK_SIZE,
2494 .setkey = aead_setkey,
2495 .setauthsize = aead_setauthsize,
2496 .encrypt = aead_encrypt,
2497 .decrypt = aead_decrypt,
2498 .ivsize = DES_BLOCK_SIZE,
2499 .maxauthsize = SHA512_DIGEST_SIZE,
2502 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2503 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2504 OP_ALG_AAI_HMAC_PRECOMP,
2510 .cra_name = "echainiv(authenc(hmac(sha512),"
2512 .cra_driver_name = "echainiv-authenc-"
2513 "hmac-sha512-cbc-des-"
2515 .cra_blocksize = DES_BLOCK_SIZE,
2517 .setkey = aead_setkey,
2518 .setauthsize = aead_setauthsize,
2519 .encrypt = aead_encrypt,
2520 .decrypt = aead_decrypt,
2521 .ivsize = DES_BLOCK_SIZE,
2522 .maxauthsize = SHA512_DIGEST_SIZE,
2525 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2526 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2527 OP_ALG_AAI_HMAC_PRECOMP,
2533 struct caam_crypto_alg {
2534 struct list_head entry;
2535 struct crypto_alg crypto_alg;
2536 struct caam_alg_entry caam;
2539 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2542 struct caam_drv_private *priv;
2545 * distribute tfms across job rings to ensure in-order
2546 * crypto request processing per tfm
2548 ctx->jrdev = caam_jr_alloc();
2549 if (IS_ERR(ctx->jrdev)) {
2550 pr_err("Job Ring Device allocation for transform failed\n");
2551 return PTR_ERR(ctx->jrdev);
2554 priv = dev_get_drvdata(ctx->jrdev->parent);
2555 if (priv->era >= 6 && uses_dkp)
2556 ctx->dir = DMA_BIDIRECTIONAL;
2558 ctx->dir = DMA_TO_DEVICE;
2560 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2562 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2563 dev_err(ctx->jrdev, "unable to map key\n");
2564 caam_jr_free(ctx->jrdev);
2568 /* copy descriptor header template value */
2569 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2570 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2572 ctx->qidev = priv->qidev;
2574 spin_lock_init(&ctx->lock);
2575 ctx->drv_ctx[ENCRYPT] = NULL;
2576 ctx->drv_ctx[DECRYPT] = NULL;
2577 ctx->drv_ctx[GIVENCRYPT] = NULL;
2582 static int caam_cra_init(struct crypto_tfm *tfm)
2584 struct crypto_alg *alg = tfm->__crt_alg;
2585 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2587 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2589 return caam_init_common(ctx, &caam_alg->caam, false);
2592 static int caam_aead_init(struct crypto_aead *tfm)
2594 struct aead_alg *alg = crypto_aead_alg(tfm);
2595 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2597 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2599 return caam_init_common(ctx, &caam_alg->caam,
2600 alg->setkey == aead_setkey);
2603 static void caam_exit_common(struct caam_ctx *ctx)
2605 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2606 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2607 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2609 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2611 caam_jr_free(ctx->jrdev);
2614 static void caam_cra_exit(struct crypto_tfm *tfm)
2616 caam_exit_common(crypto_tfm_ctx(tfm));
2619 static void caam_aead_exit(struct crypto_aead *tfm)
2621 caam_exit_common(crypto_aead_ctx(tfm));
2624 static struct list_head alg_list;
2625 static void __exit caam_qi_algapi_exit(void)
2627 struct caam_crypto_alg *t_alg, *n;
2630 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2631 struct caam_aead_alg *t_alg = driver_aeads + i;
2633 if (t_alg->registered)
2634 crypto_unregister_aead(&t_alg->aead);
2640 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2641 crypto_unregister_alg(&t_alg->crypto_alg);
2642 list_del(&t_alg->entry);
2647 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2650 struct caam_crypto_alg *t_alg;
2651 struct crypto_alg *alg;
2653 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2655 return ERR_PTR(-ENOMEM);
2657 alg = &t_alg->crypto_alg;
2659 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2660 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2661 template->driver_name);
2662 alg->cra_module = THIS_MODULE;
2663 alg->cra_init = caam_cra_init;
2664 alg->cra_exit = caam_cra_exit;
2665 alg->cra_priority = CAAM_CRA_PRIORITY;
2666 alg->cra_blocksize = template->blocksize;
2667 alg->cra_alignmask = 0;
2668 alg->cra_ctxsize = sizeof(struct caam_ctx);
2669 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2671 switch (template->type) {
2672 case CRYPTO_ALG_TYPE_GIVCIPHER:
2673 alg->cra_type = &crypto_givcipher_type;
2674 alg->cra_ablkcipher = template->template_ablkcipher;
2676 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2677 alg->cra_type = &crypto_ablkcipher_type;
2678 alg->cra_ablkcipher = template->template_ablkcipher;
2682 t_alg->caam.class1_alg_type = template->class1_alg_type;
2683 t_alg->caam.class2_alg_type = template->class2_alg_type;
2688 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2690 struct aead_alg *alg = &t_alg->aead;
2692 alg->base.cra_module = THIS_MODULE;
2693 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2694 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2695 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2697 alg->init = caam_aead_init;
2698 alg->exit = caam_aead_exit;
2701 static int __init caam_qi_algapi_init(void)
2703 struct device_node *dev_node;
2704 struct platform_device *pdev;
2705 struct device *ctrldev;
2706 struct caam_drv_private *priv;
2708 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2709 unsigned int md_limit = SHA512_DIGEST_SIZE;
2710 bool registered = false;
2712 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2714 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2719 pdev = of_find_device_by_node(dev_node);
2720 of_node_put(dev_node);
2724 ctrldev = &pdev->dev;
2725 priv = dev_get_drvdata(ctrldev);
2728 * If priv is NULL, it's probably because the caam driver wasn't
2729 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2731 if (!priv || !priv->qi_present)
2735 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2739 INIT_LIST_HEAD(&alg_list);
2742 * Register crypto algorithms the device supports.
2743 * First, detect presence and attributes of DES, AES, and MD blocks.
2745 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2746 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2747 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2748 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2749 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2751 /* If MD is present, limit digest size based on LP256 */
2752 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2753 md_limit = SHA256_DIGEST_SIZE;
2755 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2756 struct caam_crypto_alg *t_alg;
2757 struct caam_alg_template *alg = driver_algs + i;
2758 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2760 /* Skip DES algorithms if not supported by device */
2762 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2763 (alg_sel == OP_ALG_ALGSEL_DES)))
2766 /* Skip AES algorithms if not supported by device */
2767 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2770 t_alg = caam_alg_alloc(alg);
2771 if (IS_ERR(t_alg)) {
2772 err = PTR_ERR(t_alg);
2773 dev_warn(priv->qidev, "%s alg allocation failed\n",
2778 err = crypto_register_alg(&t_alg->crypto_alg);
2780 dev_warn(priv->qidev, "%s alg registration failed\n",
2781 t_alg->crypto_alg.cra_driver_name);
2786 list_add_tail(&t_alg->entry, &alg_list);
2790 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2791 struct caam_aead_alg *t_alg = driver_aeads + i;
2792 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2794 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2796 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2798 /* Skip DES algorithms if not supported by device */
2800 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2801 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2804 /* Skip AES algorithms if not supported by device */
2805 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2809 * Check support for AES algorithms not available
2812 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2813 (alg_aai == OP_ALG_AAI_GCM))
2817 * Skip algorithms requiring message digests
2818 * if MD or MD size is not supported by device.
2821 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2824 caam_aead_alg_init(t_alg);
2826 err = crypto_register_aead(&t_alg->aead);
2828 pr_warn("%s alg registration failed\n",
2829 t_alg->aead.base.cra_driver_name);
2833 t_alg->registered = true;
2838 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2843 module_init(caam_qi_algapi_init);
2844 module_exit(caam_qi_algapi_exit);
2846 MODULE_LICENSE("GPL");
2847 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2848 MODULE_AUTHOR("Freescale Semiconductor");