1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
69 #define CAAM_CRA_PRIORITY 3000
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
78 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN 8
83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
86 /* for print_hex_dumps with line references */
87 #define debug(format, arg...) printk(format, arg)
89 #define debug(format, arg...)
93 static struct list_head hash_list;
95 /* ahash per-session context */
96 struct caam_hash_ctx {
97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
101 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102 dma_addr_t sh_desc_update_first_dma;
103 dma_addr_t sh_desc_fin_dma;
104 dma_addr_t sh_desc_digest_dma;
105 enum dma_data_direction dir;
106 struct device *jrdev;
107 u8 key[CAAM_MAX_HASH_KEY_SIZE];
109 struct alginfo adata;
113 struct caam_hash_state {
116 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
118 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
120 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121 int (*update)(struct ahash_request *req);
122 int (*final)(struct ahash_request *req);
123 int (*finup)(struct ahash_request *req);
127 struct caam_export_state {
128 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
129 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
136 static inline void switch_buf(struct caam_hash_state *state)
138 state->current_buf ^= 1;
141 static inline u8 *current_buf(struct caam_hash_state *state)
143 return state->current_buf ? state->buf_1 : state->buf_0;
146 static inline u8 *alt_buf(struct caam_hash_state *state)
148 return state->current_buf ? state->buf_0 : state->buf_1;
151 static inline int *current_buflen(struct caam_hash_state *state)
153 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
156 static inline int *alt_buflen(struct caam_hash_state *state)
158 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
161 /* Common job descriptor seq in/out ptr routines */
163 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
164 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
165 struct caam_hash_state *state,
168 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
169 ctx_len, DMA_FROM_DEVICE);
170 if (dma_mapping_error(jrdev, state->ctx_dma)) {
171 dev_err(jrdev, "unable to map ctx\n");
176 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
181 /* Map req->result, and append seq_out_ptr command that points to it */
182 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
183 u8 *result, int digestsize)
187 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
188 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
193 /* Map current buffer in state (if length > 0) and put it in link table */
194 static inline int buf_map_to_sec4_sg(struct device *jrdev,
195 struct sec4_sg_entry *sec4_sg,
196 struct caam_hash_state *state)
198 int buflen = *current_buflen(state);
203 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
205 if (dma_mapping_error(jrdev, state->buf_dma)) {
206 dev_err(jrdev, "unable to map buf\n");
211 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
216 /* Map state->caam_ctx, and add it to link table */
217 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
218 struct caam_hash_state *state, int ctx_len,
219 struct sec4_sg_entry *sec4_sg, u32 flag)
221 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
222 if (dma_mapping_error(jrdev, state->ctx_dma)) {
223 dev_err(jrdev, "unable to map ctx\n");
228 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
233 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
235 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
236 int digestsize = crypto_ahash_digestsize(ahash);
237 struct device *jrdev = ctx->jrdev;
238 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
241 ctx->adata.key_virt = ctx->key;
243 /* ahash_update shared descriptor */
244 desc = ctx->sh_desc_update;
245 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
246 ctx->ctx_len, true, ctrlpriv->era);
247 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
248 desc_bytes(desc), ctx->dir);
250 print_hex_dump(KERN_ERR,
251 "ahash update shdesc@"__stringify(__LINE__)": ",
252 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
255 /* ahash_update_first shared descriptor */
256 desc = ctx->sh_desc_update_first;
257 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
258 ctx->ctx_len, false, ctrlpriv->era);
259 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
260 desc_bytes(desc), ctx->dir);
262 print_hex_dump(KERN_ERR,
263 "ahash update first shdesc@"__stringify(__LINE__)": ",
264 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
267 /* ahash_final shared descriptor */
268 desc = ctx->sh_desc_fin;
269 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
270 ctx->ctx_len, true, ctrlpriv->era);
271 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
272 desc_bytes(desc), ctx->dir);
274 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
275 DUMP_PREFIX_ADDRESS, 16, 4, desc,
276 desc_bytes(desc), 1);
279 /* ahash_digest shared descriptor */
280 desc = ctx->sh_desc_digest;
281 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
282 ctx->ctx_len, false, ctrlpriv->era);
283 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
284 desc_bytes(desc), ctx->dir);
286 print_hex_dump(KERN_ERR,
287 "ahash digest shdesc@"__stringify(__LINE__)": ",
288 DUMP_PREFIX_ADDRESS, 16, 4, desc,
289 desc_bytes(desc), 1);
295 /* Digest hash size if it is too large */
296 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
297 u32 *keylen, u8 *key_out, u32 digestsize)
299 struct device *jrdev = ctx->jrdev;
301 struct split_key_result result;
302 dma_addr_t src_dma, dst_dma;
305 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
307 dev_err(jrdev, "unable to allocate key input memory\n");
311 init_job_desc(desc, 0);
313 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
315 if (dma_mapping_error(jrdev, src_dma)) {
316 dev_err(jrdev, "unable to map key input memory\n");
320 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
322 if (dma_mapping_error(jrdev, dst_dma)) {
323 dev_err(jrdev, "unable to map key output memory\n");
324 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
329 /* Job descriptor to perform unkeyed hash on key_in */
330 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
331 OP_ALG_AS_INITFINAL);
332 append_seq_in_ptr(desc, src_dma, *keylen, 0);
333 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
334 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
335 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
336 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
337 LDST_SRCDST_BYTE_CONTEXT);
340 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
341 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
342 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
343 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
347 init_completion(&result.completion);
349 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
352 wait_for_completion(&result.completion);
355 print_hex_dump(KERN_ERR,
356 "digested key@"__stringify(__LINE__)": ",
357 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
361 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
362 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
364 *keylen = digestsize;
371 static int ahash_setkey(struct crypto_ahash *ahash,
372 const u8 *key, unsigned int keylen)
374 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
375 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
376 int digestsize = crypto_ahash_digestsize(ahash);
377 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
379 u8 *hashed_key = NULL;
382 printk(KERN_ERR "keylen %d\n", keylen);
385 if (keylen > blocksize) {
386 hashed_key = kmalloc_array(digestsize,
388 GFP_KERNEL | GFP_DMA);
391 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
399 * If DKP is supported, use it in the shared descriptor to generate
402 if (ctrlpriv->era >= 6) {
403 ctx->adata.key_inline = true;
404 ctx->adata.keylen = keylen;
405 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
408 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
411 memcpy(ctx->key, key, keylen);
413 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
414 keylen, CAAM_MAX_HASH_KEY_SIZE);
420 return ahash_set_sh_desc(ahash);
423 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
428 * ahash_edesc - s/w-extended ahash descriptor
429 * @dst_dma: physical mapped address of req->result
430 * @sec4_sg_dma: physical mapped address of h/w link table
431 * @src_nents: number of segments in input scatterlist
432 * @sec4_sg_bytes: length of dma mapped sec4_sg space
433 * @hw_desc: the h/w job descriptor followed by any referenced link tables
434 * @sec4_sg: h/w link table
438 dma_addr_t sec4_sg_dma;
441 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
442 struct sec4_sg_entry sec4_sg[0];
445 static inline void ahash_unmap(struct device *dev,
446 struct ahash_edesc *edesc,
447 struct ahash_request *req, int dst_len)
449 struct caam_hash_state *state = ahash_request_ctx(req);
451 if (edesc->src_nents)
452 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
454 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
456 if (edesc->sec4_sg_bytes)
457 dma_unmap_single(dev, edesc->sec4_sg_dma,
458 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
460 if (state->buf_dma) {
461 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
467 static inline void ahash_unmap_ctx(struct device *dev,
468 struct ahash_edesc *edesc,
469 struct ahash_request *req, int dst_len, u32 flag)
471 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
472 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
473 struct caam_hash_state *state = ahash_request_ctx(req);
475 if (state->ctx_dma) {
476 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
479 ahash_unmap(dev, edesc, req, dst_len);
482 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
485 struct ahash_request *req = context;
486 struct ahash_edesc *edesc;
487 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
488 int digestsize = crypto_ahash_digestsize(ahash);
490 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
491 struct caam_hash_state *state = ahash_request_ctx(req);
493 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
496 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
498 caam_jr_strstatus(jrdev, err);
500 ahash_unmap(jrdev, edesc, req, digestsize);
504 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
505 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
508 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
509 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
513 req->base.complete(&req->base, err);
516 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
519 struct ahash_request *req = context;
520 struct ahash_edesc *edesc;
521 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
522 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
523 struct caam_hash_state *state = ahash_request_ctx(req);
525 int digestsize = crypto_ahash_digestsize(ahash);
527 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
530 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
532 caam_jr_strstatus(jrdev, err);
534 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
539 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
540 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
543 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
544 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
548 req->base.complete(&req->base, err);
551 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
554 struct ahash_request *req = context;
555 struct ahash_edesc *edesc;
556 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
557 int digestsize = crypto_ahash_digestsize(ahash);
559 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
560 struct caam_hash_state *state = ahash_request_ctx(req);
562 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
565 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
567 caam_jr_strstatus(jrdev, err);
569 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
573 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
574 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
577 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
578 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
582 req->base.complete(&req->base, err);
585 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
588 struct ahash_request *req = context;
589 struct ahash_edesc *edesc;
590 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
591 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
592 struct caam_hash_state *state = ahash_request_ctx(req);
594 int digestsize = crypto_ahash_digestsize(ahash);
596 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
599 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
601 caam_jr_strstatus(jrdev, err);
603 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
608 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
609 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
612 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
613 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
617 req->base.complete(&req->base, err);
621 * Allocate an enhanced descriptor, which contains the hardware descriptor
622 * and space for hardware scatter table containing sg_num entries.
624 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
625 int sg_num, u32 *sh_desc,
626 dma_addr_t sh_desc_dma,
629 struct ahash_edesc *edesc;
630 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
632 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
634 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
638 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
639 HDR_SHARE_DEFER | HDR_REVERSE);
644 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
645 struct ahash_edesc *edesc,
646 struct ahash_request *req, int nents,
647 unsigned int first_sg,
648 unsigned int first_bytes, size_t to_hash)
653 if (nents > 1 || first_sg) {
654 struct sec4_sg_entry *sg = edesc->sec4_sg;
655 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
657 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
659 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
660 if (dma_mapping_error(ctx->jrdev, src_dma)) {
661 dev_err(ctx->jrdev, "unable to map S/G table\n");
665 edesc->sec4_sg_bytes = sgsize;
666 edesc->sec4_sg_dma = src_dma;
669 src_dma = sg_dma_address(req->src);
673 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
679 /* submit update job descriptor */
680 static int ahash_update_ctx(struct ahash_request *req)
682 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
683 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
684 struct caam_hash_state *state = ahash_request_ctx(req);
685 struct device *jrdev = ctx->jrdev;
686 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
687 GFP_KERNEL : GFP_ATOMIC;
688 u8 *buf = current_buf(state);
689 int *buflen = current_buflen(state);
690 u8 *next_buf = alt_buf(state);
691 int *next_buflen = alt_buflen(state), last_buflen;
692 int in_len = *buflen + req->nbytes, to_hash;
694 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
695 struct ahash_edesc *edesc;
698 last_buflen = *next_buflen;
699 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
700 to_hash = in_len - *next_buflen;
703 src_nents = sg_nents_for_len(req->src,
704 req->nbytes - (*next_buflen));
706 dev_err(jrdev, "Invalid number of src SG.\n");
711 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
714 dev_err(jrdev, "unable to DMA map source\n");
721 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
722 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
723 sizeof(struct sec4_sg_entry);
726 * allocate space for base edesc and hw desc commands,
729 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
731 ctx->sh_desc_update_dma, flags);
733 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
737 edesc->src_nents = src_nents;
738 edesc->sec4_sg_bytes = sec4_sg_bytes;
740 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
741 edesc->sec4_sg, DMA_BIDIRECTIONAL);
745 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
750 sg_to_sec4_sg_last(req->src, mapped_nents,
751 edesc->sec4_sg + sec4_sg_src_index,
754 scatterwalk_map_and_copy(next_buf, req->src,
758 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
762 desc = edesc->hw_desc;
764 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
767 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
768 dev_err(jrdev, "unable to map S/G table\n");
773 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
776 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
779 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
780 DUMP_PREFIX_ADDRESS, 16, 4, desc,
781 desc_bytes(desc), 1);
784 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
789 } else if (*next_buflen) {
790 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
792 *buflen = *next_buflen;
793 *next_buflen = last_buflen;
796 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
797 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
798 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
799 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
805 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
810 static int ahash_final_ctx(struct ahash_request *req)
812 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
813 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
814 struct caam_hash_state *state = ahash_request_ctx(req);
815 struct device *jrdev = ctx->jrdev;
816 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
817 GFP_KERNEL : GFP_ATOMIC;
818 int buflen = *current_buflen(state);
820 int sec4_sg_bytes, sec4_sg_src_index;
821 int digestsize = crypto_ahash_digestsize(ahash);
822 struct ahash_edesc *edesc;
825 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
826 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
828 /* allocate space for base edesc and hw desc commands, link tables */
829 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
830 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
835 desc = edesc->hw_desc;
837 edesc->sec4_sg_bytes = sec4_sg_bytes;
839 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
840 edesc->sec4_sg, DMA_TO_DEVICE);
844 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
848 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
850 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
851 sec4_sg_bytes, DMA_TO_DEVICE);
852 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
853 dev_err(jrdev, "unable to map S/G table\n");
858 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
861 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
863 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
864 dev_err(jrdev, "unable to map dst\n");
870 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
871 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
874 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
880 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
885 static int ahash_finup_ctx(struct ahash_request *req)
887 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
888 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
889 struct caam_hash_state *state = ahash_request_ctx(req);
890 struct device *jrdev = ctx->jrdev;
891 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
892 GFP_KERNEL : GFP_ATOMIC;
893 int buflen = *current_buflen(state);
895 int sec4_sg_src_index;
896 int src_nents, mapped_nents;
897 int digestsize = crypto_ahash_digestsize(ahash);
898 struct ahash_edesc *edesc;
901 src_nents = sg_nents_for_len(req->src, req->nbytes);
903 dev_err(jrdev, "Invalid number of src SG.\n");
908 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
911 dev_err(jrdev, "unable to DMA map source\n");
918 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
920 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
922 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
925 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
929 desc = edesc->hw_desc;
931 edesc->src_nents = src_nents;
933 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
934 edesc->sec4_sg, DMA_TO_DEVICE);
938 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
942 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
943 sec4_sg_src_index, ctx->ctx_len + buflen,
948 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
950 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
951 dev_err(jrdev, "unable to map dst\n");
957 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
958 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
961 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
967 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
972 static int ahash_digest(struct ahash_request *req)
974 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
975 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
976 struct caam_hash_state *state = ahash_request_ctx(req);
977 struct device *jrdev = ctx->jrdev;
978 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
979 GFP_KERNEL : GFP_ATOMIC;
981 int digestsize = crypto_ahash_digestsize(ahash);
982 int src_nents, mapped_nents;
983 struct ahash_edesc *edesc;
988 src_nents = sg_nents_for_len(req->src, req->nbytes);
990 dev_err(jrdev, "Invalid number of src SG.\n");
995 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
998 dev_err(jrdev, "unable to map source for DMA\n");
1005 /* allocate space for base edesc and hw desc commands, link tables */
1006 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1007 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1010 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1014 edesc->src_nents = src_nents;
1016 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1019 ahash_unmap(jrdev, edesc, req, digestsize);
1024 desc = edesc->hw_desc;
1026 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1028 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1029 dev_err(jrdev, "unable to map dst\n");
1030 ahash_unmap(jrdev, edesc, req, digestsize);
1036 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1037 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1040 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1044 ahash_unmap(jrdev, edesc, req, digestsize);
1051 /* submit ahash final if it the first job descriptor */
1052 static int ahash_final_no_ctx(struct ahash_request *req)
1054 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1055 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1056 struct caam_hash_state *state = ahash_request_ctx(req);
1057 struct device *jrdev = ctx->jrdev;
1058 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1059 GFP_KERNEL : GFP_ATOMIC;
1060 u8 *buf = current_buf(state);
1061 int buflen = *current_buflen(state);
1063 int digestsize = crypto_ahash_digestsize(ahash);
1064 struct ahash_edesc *edesc;
1067 /* allocate space for base edesc and hw desc commands, link tables */
1068 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1069 ctx->sh_desc_digest_dma, flags);
1073 desc = edesc->hw_desc;
1076 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1078 if (dma_mapping_error(jrdev, state->buf_dma)) {
1079 dev_err(jrdev, "unable to map src\n");
1083 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1086 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1088 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1089 dev_err(jrdev, "unable to map dst\n");
1094 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1095 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1098 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1102 ahash_unmap(jrdev, edesc, req, digestsize);
1108 ahash_unmap(jrdev, edesc, req, digestsize);
1114 /* submit ahash update if it the first job descriptor after update */
1115 static int ahash_update_no_ctx(struct ahash_request *req)
1117 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1118 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1119 struct caam_hash_state *state = ahash_request_ctx(req);
1120 struct device *jrdev = ctx->jrdev;
1121 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1122 GFP_KERNEL : GFP_ATOMIC;
1123 u8 *buf = current_buf(state);
1124 int *buflen = current_buflen(state);
1125 u8 *next_buf = alt_buf(state);
1126 int *next_buflen = alt_buflen(state);
1127 int in_len = *buflen + req->nbytes, to_hash;
1128 int sec4_sg_bytes, src_nents, mapped_nents;
1129 struct ahash_edesc *edesc;
1133 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1134 to_hash = in_len - *next_buflen;
1137 src_nents = sg_nents_for_len(req->src,
1138 req->nbytes - *next_buflen);
1139 if (src_nents < 0) {
1140 dev_err(jrdev, "Invalid number of src SG.\n");
1145 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1147 if (!mapped_nents) {
1148 dev_err(jrdev, "unable to DMA map source\n");
1155 sec4_sg_bytes = (1 + mapped_nents) *
1156 sizeof(struct sec4_sg_entry);
1159 * allocate space for base edesc and hw desc commands,
1162 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1163 ctx->sh_desc_update_first,
1164 ctx->sh_desc_update_first_dma,
1167 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1171 edesc->src_nents = src_nents;
1172 edesc->sec4_sg_bytes = sec4_sg_bytes;
1174 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1178 sg_to_sec4_sg_last(req->src, mapped_nents,
1179 edesc->sec4_sg + 1, 0);
1182 scatterwalk_map_and_copy(next_buf, req->src,
1187 desc = edesc->hw_desc;
1189 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1192 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1193 dev_err(jrdev, "unable to map S/G table\n");
1198 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1200 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1205 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1206 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1207 desc_bytes(desc), 1);
1210 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1215 state->update = ahash_update_ctx;
1216 state->finup = ahash_finup_ctx;
1217 state->final = ahash_final_ctx;
1218 } else if (*next_buflen) {
1219 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1221 *buflen = *next_buflen;
1225 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1226 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1227 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1228 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1234 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1239 /* submit ahash finup if it the first job descriptor after update */
1240 static int ahash_finup_no_ctx(struct ahash_request *req)
1242 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1243 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1244 struct caam_hash_state *state = ahash_request_ctx(req);
1245 struct device *jrdev = ctx->jrdev;
1246 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1247 GFP_KERNEL : GFP_ATOMIC;
1248 int buflen = *current_buflen(state);
1250 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1251 int digestsize = crypto_ahash_digestsize(ahash);
1252 struct ahash_edesc *edesc;
1255 src_nents = sg_nents_for_len(req->src, req->nbytes);
1256 if (src_nents < 0) {
1257 dev_err(jrdev, "Invalid number of src SG.\n");
1262 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1264 if (!mapped_nents) {
1265 dev_err(jrdev, "unable to DMA map source\n");
1272 sec4_sg_src_index = 2;
1273 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1274 sizeof(struct sec4_sg_entry);
1276 /* allocate space for base edesc and hw desc commands, link tables */
1277 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1278 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1281 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1285 desc = edesc->hw_desc;
1287 edesc->src_nents = src_nents;
1288 edesc->sec4_sg_bytes = sec4_sg_bytes;
1290 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1294 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1297 dev_err(jrdev, "unable to map S/G table\n");
1301 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1303 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1304 dev_err(jrdev, "unable to map dst\n");
1309 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1310 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1313 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1317 ahash_unmap(jrdev, edesc, req, digestsize);
1323 ahash_unmap(jrdev, edesc, req, digestsize);
1329 /* submit first update job descriptor after init */
1330 static int ahash_update_first(struct ahash_request *req)
1332 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1333 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1334 struct caam_hash_state *state = ahash_request_ctx(req);
1335 struct device *jrdev = ctx->jrdev;
1336 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1337 GFP_KERNEL : GFP_ATOMIC;
1338 u8 *next_buf = alt_buf(state);
1339 int *next_buflen = alt_buflen(state);
1342 int src_nents, mapped_nents;
1343 struct ahash_edesc *edesc;
1346 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1348 to_hash = req->nbytes - *next_buflen;
1351 src_nents = sg_nents_for_len(req->src,
1352 req->nbytes - *next_buflen);
1353 if (src_nents < 0) {
1354 dev_err(jrdev, "Invalid number of src SG.\n");
1359 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1361 if (!mapped_nents) {
1362 dev_err(jrdev, "unable to map source for DMA\n");
1370 * allocate space for base edesc and hw desc commands,
1373 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1375 ctx->sh_desc_update_first,
1376 ctx->sh_desc_update_first_dma,
1379 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1383 edesc->src_nents = src_nents;
1385 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1391 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1394 desc = edesc->hw_desc;
1396 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1401 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1402 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1403 desc_bytes(desc), 1);
1406 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1411 state->update = ahash_update_ctx;
1412 state->finup = ahash_finup_ctx;
1413 state->final = ahash_final_ctx;
1414 } else if (*next_buflen) {
1415 state->update = ahash_update_no_ctx;
1416 state->finup = ahash_finup_no_ctx;
1417 state->final = ahash_final_no_ctx;
1418 scatterwalk_map_and_copy(next_buf, req->src, 0,
1423 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1424 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1430 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1435 static int ahash_finup_first(struct ahash_request *req)
1437 return ahash_digest(req);
1440 static int ahash_init(struct ahash_request *req)
1442 struct caam_hash_state *state = ahash_request_ctx(req);
1444 state->update = ahash_update_first;
1445 state->finup = ahash_finup_first;
1446 state->final = ahash_final_no_ctx;
1449 state->current_buf = 0;
1451 state->buflen_0 = 0;
1452 state->buflen_1 = 0;
1457 static int ahash_update(struct ahash_request *req)
1459 struct caam_hash_state *state = ahash_request_ctx(req);
1461 return state->update(req);
1464 static int ahash_finup(struct ahash_request *req)
1466 struct caam_hash_state *state = ahash_request_ctx(req);
1468 return state->finup(req);
1471 static int ahash_final(struct ahash_request *req)
1473 struct caam_hash_state *state = ahash_request_ctx(req);
1475 return state->final(req);
1478 static int ahash_export(struct ahash_request *req, void *out)
1480 struct caam_hash_state *state = ahash_request_ctx(req);
1481 struct caam_export_state *export = out;
1485 if (state->current_buf) {
1487 len = state->buflen_1;
1490 len = state->buflen_0;
1493 memcpy(export->buf, buf, len);
1494 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1495 export->buflen = len;
1496 export->update = state->update;
1497 export->final = state->final;
1498 export->finup = state->finup;
1503 static int ahash_import(struct ahash_request *req, const void *in)
1505 struct caam_hash_state *state = ahash_request_ctx(req);
1506 const struct caam_export_state *export = in;
1508 memset(state, 0, sizeof(*state));
1509 memcpy(state->buf_0, export->buf, export->buflen);
1510 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1511 state->buflen_0 = export->buflen;
1512 state->update = export->update;
1513 state->final = export->final;
1514 state->finup = export->finup;
1519 struct caam_hash_template {
1520 char name[CRYPTO_MAX_ALG_NAME];
1521 char driver_name[CRYPTO_MAX_ALG_NAME];
1522 char hmac_name[CRYPTO_MAX_ALG_NAME];
1523 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1524 unsigned int blocksize;
1525 struct ahash_alg template_ahash;
1529 /* ahash descriptors */
1530 static struct caam_hash_template driver_hash[] = {
1533 .driver_name = "sha1-caam",
1534 .hmac_name = "hmac(sha1)",
1535 .hmac_driver_name = "hmac-sha1-caam",
1536 .blocksize = SHA1_BLOCK_SIZE,
1539 .update = ahash_update,
1540 .final = ahash_final,
1541 .finup = ahash_finup,
1542 .digest = ahash_digest,
1543 .export = ahash_export,
1544 .import = ahash_import,
1545 .setkey = ahash_setkey,
1547 .digestsize = SHA1_DIGEST_SIZE,
1548 .statesize = sizeof(struct caam_export_state),
1551 .alg_type = OP_ALG_ALGSEL_SHA1,
1554 .driver_name = "sha224-caam",
1555 .hmac_name = "hmac(sha224)",
1556 .hmac_driver_name = "hmac-sha224-caam",
1557 .blocksize = SHA224_BLOCK_SIZE,
1560 .update = ahash_update,
1561 .final = ahash_final,
1562 .finup = ahash_finup,
1563 .digest = ahash_digest,
1564 .export = ahash_export,
1565 .import = ahash_import,
1566 .setkey = ahash_setkey,
1568 .digestsize = SHA224_DIGEST_SIZE,
1569 .statesize = sizeof(struct caam_export_state),
1572 .alg_type = OP_ALG_ALGSEL_SHA224,
1575 .driver_name = "sha256-caam",
1576 .hmac_name = "hmac(sha256)",
1577 .hmac_driver_name = "hmac-sha256-caam",
1578 .blocksize = SHA256_BLOCK_SIZE,
1581 .update = ahash_update,
1582 .final = ahash_final,
1583 .finup = ahash_finup,
1584 .digest = ahash_digest,
1585 .export = ahash_export,
1586 .import = ahash_import,
1587 .setkey = ahash_setkey,
1589 .digestsize = SHA256_DIGEST_SIZE,
1590 .statesize = sizeof(struct caam_export_state),
1593 .alg_type = OP_ALG_ALGSEL_SHA256,
1596 .driver_name = "sha384-caam",
1597 .hmac_name = "hmac(sha384)",
1598 .hmac_driver_name = "hmac-sha384-caam",
1599 .blocksize = SHA384_BLOCK_SIZE,
1602 .update = ahash_update,
1603 .final = ahash_final,
1604 .finup = ahash_finup,
1605 .digest = ahash_digest,
1606 .export = ahash_export,
1607 .import = ahash_import,
1608 .setkey = ahash_setkey,
1610 .digestsize = SHA384_DIGEST_SIZE,
1611 .statesize = sizeof(struct caam_export_state),
1614 .alg_type = OP_ALG_ALGSEL_SHA384,
1617 .driver_name = "sha512-caam",
1618 .hmac_name = "hmac(sha512)",
1619 .hmac_driver_name = "hmac-sha512-caam",
1620 .blocksize = SHA512_BLOCK_SIZE,
1623 .update = ahash_update,
1624 .final = ahash_final,
1625 .finup = ahash_finup,
1626 .digest = ahash_digest,
1627 .export = ahash_export,
1628 .import = ahash_import,
1629 .setkey = ahash_setkey,
1631 .digestsize = SHA512_DIGEST_SIZE,
1632 .statesize = sizeof(struct caam_export_state),
1635 .alg_type = OP_ALG_ALGSEL_SHA512,
1638 .driver_name = "md5-caam",
1639 .hmac_name = "hmac(md5)",
1640 .hmac_driver_name = "hmac-md5-caam",
1641 .blocksize = MD5_BLOCK_WORDS * 4,
1644 .update = ahash_update,
1645 .final = ahash_final,
1646 .finup = ahash_finup,
1647 .digest = ahash_digest,
1648 .export = ahash_export,
1649 .import = ahash_import,
1650 .setkey = ahash_setkey,
1652 .digestsize = MD5_DIGEST_SIZE,
1653 .statesize = sizeof(struct caam_export_state),
1656 .alg_type = OP_ALG_ALGSEL_MD5,
1660 struct caam_hash_alg {
1661 struct list_head entry;
1663 struct ahash_alg ahash_alg;
1666 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1668 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1669 struct crypto_alg *base = tfm->__crt_alg;
1670 struct hash_alg_common *halg =
1671 container_of(base, struct hash_alg_common, base);
1672 struct ahash_alg *alg =
1673 container_of(halg, struct ahash_alg, halg);
1674 struct caam_hash_alg *caam_hash =
1675 container_of(alg, struct caam_hash_alg, ahash_alg);
1676 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1677 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1678 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1679 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1681 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1683 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1684 dma_addr_t dma_addr;
1685 struct caam_drv_private *priv;
1688 * Get a Job ring from Job Ring driver to ensure in-order
1689 * crypto request processing per tfm
1691 ctx->jrdev = caam_jr_alloc();
1692 if (IS_ERR(ctx->jrdev)) {
1693 pr_err("Job Ring Device allocation for transform failed\n");
1694 return PTR_ERR(ctx->jrdev);
1697 priv = dev_get_drvdata(ctx->jrdev->parent);
1698 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1700 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1701 offsetof(struct caam_hash_ctx,
1702 sh_desc_update_dma),
1703 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1704 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1705 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1706 caam_jr_free(ctx->jrdev);
1710 ctx->sh_desc_update_dma = dma_addr;
1711 ctx->sh_desc_update_first_dma = dma_addr +
1712 offsetof(struct caam_hash_ctx,
1713 sh_desc_update_first);
1714 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1716 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1719 /* copy descriptor header template value */
1720 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1722 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1723 OP_ALG_ALGSEL_SUBMASK) >>
1724 OP_ALG_ALGSEL_SHIFT];
1726 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1727 sizeof(struct caam_hash_state));
1728 return ahash_set_sh_desc(ahash);
1731 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1733 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1735 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1736 offsetof(struct caam_hash_ctx,
1737 sh_desc_update_dma),
1738 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1739 caam_jr_free(ctx->jrdev);
1742 static void __exit caam_algapi_hash_exit(void)
1744 struct caam_hash_alg *t_alg, *n;
1746 if (!hash_list.next)
1749 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1750 crypto_unregister_ahash(&t_alg->ahash_alg);
1751 list_del(&t_alg->entry);
1756 static struct caam_hash_alg *
1757 caam_hash_alloc(struct caam_hash_template *template,
1760 struct caam_hash_alg *t_alg;
1761 struct ahash_alg *halg;
1762 struct crypto_alg *alg;
1764 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1766 pr_err("failed to allocate t_alg\n");
1767 return ERR_PTR(-ENOMEM);
1770 t_alg->ahash_alg = template->template_ahash;
1771 halg = &t_alg->ahash_alg;
1772 alg = &halg->halg.base;
1775 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1776 template->hmac_name);
1777 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1778 template->hmac_driver_name);
1780 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1782 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1783 template->driver_name);
1784 t_alg->ahash_alg.setkey = NULL;
1786 alg->cra_module = THIS_MODULE;
1787 alg->cra_init = caam_hash_cra_init;
1788 alg->cra_exit = caam_hash_cra_exit;
1789 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1790 alg->cra_priority = CAAM_CRA_PRIORITY;
1791 alg->cra_blocksize = template->blocksize;
1792 alg->cra_alignmask = 0;
1793 alg->cra_flags = CRYPTO_ALG_ASYNC;
1795 t_alg->alg_type = template->alg_type;
1800 static int __init caam_algapi_hash_init(void)
1802 struct device_node *dev_node;
1803 struct platform_device *pdev;
1804 struct device *ctrldev;
1806 struct caam_drv_private *priv;
1807 unsigned int md_limit = SHA512_DIGEST_SIZE;
1808 u32 md_inst, md_vid;
1810 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1812 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1817 pdev = of_find_device_by_node(dev_node);
1819 of_node_put(dev_node);
1823 ctrldev = &pdev->dev;
1824 priv = dev_get_drvdata(ctrldev);
1825 of_node_put(dev_node);
1828 * If priv is NULL, it's probably because the caam driver wasn't
1829 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1835 * Register crypto algorithms the device supports. First, identify
1836 * presence and attributes of MD block.
1838 if (priv->era < 10) {
1839 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1840 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1841 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1842 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1844 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1846 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1847 md_inst = mdha & CHA_VER_NUM_MASK;
1851 * Skip registration of any hashing algorithms if MD block
1857 /* Limit digest size based on LP256 */
1858 if (md_vid == CHA_VER_VID_MD_LP256)
1859 md_limit = SHA256_DIGEST_SIZE;
1861 INIT_LIST_HEAD(&hash_list);
1863 /* register crypto algorithms the device supports */
1864 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1865 struct caam_hash_alg *t_alg;
1866 struct caam_hash_template *alg = driver_hash + i;
1868 /* If MD size is not supported by device, skip registration */
1869 if (alg->template_ahash.halg.digestsize > md_limit)
1872 /* register hmac version */
1873 t_alg = caam_hash_alloc(alg, true);
1874 if (IS_ERR(t_alg)) {
1875 err = PTR_ERR(t_alg);
1876 pr_warn("%s alg allocation failed\n", alg->driver_name);
1880 err = crypto_register_ahash(&t_alg->ahash_alg);
1882 pr_warn("%s alg registration failed: %d\n",
1883 t_alg->ahash_alg.halg.base.cra_driver_name,
1887 list_add_tail(&t_alg->entry, &hash_list);
1889 /* register unkeyed version */
1890 t_alg = caam_hash_alloc(alg, false);
1891 if (IS_ERR(t_alg)) {
1892 err = PTR_ERR(t_alg);
1893 pr_warn("%s alg allocation failed\n", alg->driver_name);
1897 err = crypto_register_ahash(&t_alg->ahash_alg);
1899 pr_warn("%s alg registration failed: %d\n",
1900 t_alg->ahash_alg.halg.base.cra_driver_name,
1904 list_add_tail(&t_alg->entry, &hash_list);
1910 module_init(caam_algapi_hash_init);
1911 module_exit(caam_algapi_hash_exit);
1913 MODULE_LICENSE("GPL");
1914 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1915 MODULE_AUTHOR("Freescale Semiconductor - NMG");