2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
20 struct safexcel_ahash_ctx {
21 struct safexcel_context base;
22 struct safexcel_crypto_priv *priv;
27 u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
28 u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
31 struct safexcel_ahash_req {
36 u8 state_sz; /* expected sate size, only set once */
37 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
42 u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
43 u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
46 struct safexcel_ahash_export_state {
50 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
51 u8 cache[SHA256_BLOCK_SIZE];
54 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
55 u32 input_length, u32 result_length)
57 struct safexcel_token *token =
58 (struct safexcel_token *)cdesc->control_data.token;
60 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
61 token[0].packet_length = input_length;
62 token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
63 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
65 token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
66 token[1].packet_length = result_length;
67 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
68 EIP197_TOKEN_STAT_LAST_PACKET;
69 token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
70 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
73 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
74 struct safexcel_ahash_req *req,
75 struct safexcel_command_desc *cdesc,
76 unsigned int digestsize,
77 unsigned int blocksize)
81 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
82 cdesc->control_data.control0 |= ctx->alg;
83 cdesc->control_data.control0 |= ctx->digest;
85 if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
87 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
88 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
89 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
90 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
91 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
93 cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
95 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
99 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
102 * Copy the input digest if needed, and setup the context
103 * fields. Do this now as we need it to setup the first command
106 if (req->processed) {
107 for (i = 0; i < digestsize / sizeof(u32); i++)
108 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
111 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
113 } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
114 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
116 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
117 memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
118 ctx->opad, digestsize);
122 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
123 struct crypto_async_request *async,
124 bool *should_complete, int *ret)
126 struct safexcel_result_desc *rdesc;
127 struct ahash_request *areq = ahash_request_cast(async);
128 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
129 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
130 int cache_len, result_sz = sreq->state_sz;
134 spin_lock_bh(&priv->ring[ring].egress_lock);
135 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
138 "hash: result: could not retrieve the result descriptor\n");
139 *ret = PTR_ERR(rdesc);
140 } else if (rdesc->result_data.error_code) {
142 "hash: result: result descriptor error (%d)\n",
143 rdesc->result_data.error_code);
147 safexcel_complete(priv, ring);
148 spin_unlock_bh(&priv->ring[ring].egress_lock);
151 result_sz = crypto_ahash_digestsize(ahash);
152 memcpy(sreq->state, areq->result, result_sz);
154 dma_unmap_sg(priv->dev, areq->src,
155 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
157 safexcel_free_context(priv, async, sreq->state_sz);
159 cache_len = sreq->len - sreq->processed;
161 memcpy(sreq->cache, sreq->cache_next, cache_len);
163 *should_complete = true;
168 static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
169 struct safexcel_request *request, int *commands,
172 struct ahash_request *areq = ahash_request_cast(async);
173 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
174 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
175 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
176 struct safexcel_crypto_priv *priv = ctx->priv;
177 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
178 struct safexcel_result_desc *rdesc;
179 struct scatterlist *sg;
180 int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
182 queued = len = req->len - req->processed;
183 if (queued < crypto_ahash_blocksize(ahash))
186 cache_len = queued - areq->nbytes;
189 * If this is not the last request and the queued data does not fit
190 * into full blocks, cache it for the next send() call.
192 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
193 if (!req->last_req && extra) {
194 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
195 req->cache_next, extra, areq->nbytes - extra);
201 spin_lock_bh(&priv->ring[ring].egress_lock);
203 /* Add a command descriptor for the cached data, if any */
205 ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
206 if (!ctx->base.cache) {
210 memcpy(ctx->base.cache, req->cache, cache_len);
211 ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
212 cache_len, DMA_TO_DEVICE);
213 if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
218 ctx->base.cache_sz = cache_len;
219 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
224 if (IS_ERR(first_cdesc)) {
225 ret = PTR_ERR(first_cdesc);
235 /* Now handle the current ahash request buffer(s) */
236 nents = dma_map_sg(priv->dev, areq->src,
237 sg_nents_for_len(areq->src, areq->nbytes),
244 for_each_sg(areq->src, sg, nents, i) {
245 int sglen = sg_dma_len(sg);
247 /* Do not overflow the request */
248 if (queued - sglen < 0)
251 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
252 !(queued - sglen), sg_dma_address(sg),
253 sglen, len, ctx->base.ctxr_dma);
255 ret = PTR_ERR(cdesc);
269 /* Setup the context options */
270 safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
271 crypto_ahash_blocksize(ahash));
274 safexcel_hash_token(first_cdesc, len, req->state_sz);
276 ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
277 req->state_sz, DMA_FROM_DEVICE);
278 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
283 /* Add a result descriptor */
284 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
287 ret = PTR_ERR(rdesc);
291 spin_unlock_bh(&priv->ring[ring].egress_lock);
293 req->processed += len;
294 request->req = &areq->base;
295 ctx->base.handle_result = safexcel_handle_result;
302 for (i = 0; i < n_cdesc; i++)
303 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
305 if (ctx->base.cache_dma) {
306 dma_unmap_single(priv->dev, ctx->base.cache_dma,
307 ctx->base.cache_sz, DMA_TO_DEVICE);
308 ctx->base.cache_sz = 0;
311 kfree(ctx->base.cache);
312 ctx->base.cache = NULL;
315 spin_unlock_bh(&priv->ring[ring].egress_lock);
319 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
321 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
322 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
323 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
324 unsigned int state_w_sz = req->state_sz / sizeof(u32);
327 for (i = 0; i < state_w_sz; i++)
328 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
331 if (ctx->base.ctxr->data[state_w_sz] !=
332 cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
338 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
340 struct crypto_async_request *async,
341 bool *should_complete, int *ret)
343 struct safexcel_result_desc *rdesc;
344 struct ahash_request *areq = ahash_request_cast(async);
345 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
346 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
351 spin_lock_bh(&priv->ring[ring].egress_lock);
352 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
355 "hash: invalidate: could not retrieve the result descriptor\n");
356 *ret = PTR_ERR(rdesc);
357 } else if (rdesc->result_data.error_code) {
359 "hash: invalidate: result descriptor error (%d)\n",
360 rdesc->result_data.error_code);
364 safexcel_complete(priv, ring);
365 spin_unlock_bh(&priv->ring[ring].egress_lock);
367 if (ctx->base.exit_inv) {
368 dma_pool_free(priv->context_pool, ctx->base.ctxr,
371 *should_complete = true;
375 ring = safexcel_select_ring(priv);
376 ctx->base.ring = ring;
377 ctx->base.needs_inv = false;
378 ctx->base.send = safexcel_ahash_send;
380 spin_lock_bh(&priv->ring[ring].queue_lock);
381 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
382 spin_unlock_bh(&priv->ring[ring].queue_lock);
384 if (enq_ret != -EINPROGRESS)
387 if (!priv->ring[ring].need_dequeue)
388 safexcel_dequeue(priv, ring);
390 *should_complete = false;
395 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
396 int ring, struct safexcel_request *request,
397 int *commands, int *results)
399 struct ahash_request *areq = ahash_request_cast(async);
400 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
403 ctx->base.handle_result = safexcel_handle_inv_result;
404 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
405 ctx->base.ctxr_dma, ring, request);
415 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
417 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
418 struct safexcel_crypto_priv *priv = ctx->priv;
419 struct ahash_request req;
420 struct safexcel_inv_result result = {};
421 int ring = ctx->base.ring;
423 memset(&req, 0, sizeof(struct ahash_request));
425 /* create invalidation request */
426 init_completion(&result.completion);
427 ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
428 safexcel_inv_complete, &result);
430 ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
431 ctx = crypto_tfm_ctx(req.base.tfm);
432 ctx->base.exit_inv = true;
433 ctx->base.send = safexcel_ahash_send_inv;
435 spin_lock_bh(&priv->ring[ring].queue_lock);
436 crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
437 spin_unlock_bh(&priv->ring[ring].queue_lock);
439 if (!priv->ring[ring].need_dequeue)
440 safexcel_dequeue(priv, ring);
442 wait_for_completion_interruptible(&result.completion);
445 dev_warn(priv->dev, "hash: completion error (%d)\n",
453 static int safexcel_ahash_cache(struct ahash_request *areq)
455 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
456 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
457 int queued, cache_len;
459 cache_len = req->len - areq->nbytes - req->processed;
460 queued = req->len - req->processed;
463 * In case there isn't enough bytes to proceed (less than a
464 * block size), cache the data until we have enough.
466 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
467 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
468 req->cache + cache_len,
473 /* We could'nt cache all the data */
477 static int safexcel_ahash_enqueue(struct ahash_request *areq)
479 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
480 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
481 struct safexcel_crypto_priv *priv = ctx->priv;
484 ctx->base.send = safexcel_ahash_send;
486 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
487 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
489 if (ctx->base.ctxr) {
490 if (ctx->base.needs_inv)
491 ctx->base.send = safexcel_ahash_send_inv;
493 ctx->base.ring = safexcel_select_ring(priv);
494 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
495 EIP197_GFP_FLAGS(areq->base),
496 &ctx->base.ctxr_dma);
501 ring = ctx->base.ring;
503 spin_lock_bh(&priv->ring[ring].queue_lock);
504 ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
505 spin_unlock_bh(&priv->ring[ring].queue_lock);
507 if (!priv->ring[ring].need_dequeue)
508 safexcel_dequeue(priv, ring);
513 static int safexcel_ahash_update(struct ahash_request *areq)
515 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
516 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
517 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
519 /* If the request is 0 length, do nothing */
523 req->len += areq->nbytes;
525 safexcel_ahash_cache(areq);
528 * We're not doing partial updates when performing an hmac request.
529 * Everything will be handled by the final() call.
531 if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
535 return safexcel_ahash_enqueue(areq);
537 if (!req->last_req &&
538 req->len - req->processed > crypto_ahash_blocksize(ahash))
539 return safexcel_ahash_enqueue(areq);
544 static int safexcel_ahash_final(struct ahash_request *areq)
546 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
547 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
549 req->last_req = true;
552 /* If we have an overall 0 length request */
553 if (!(req->len + areq->nbytes)) {
554 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
555 memcpy(areq->result, sha1_zero_message_hash,
557 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
558 memcpy(areq->result, sha224_zero_message_hash,
560 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
561 memcpy(areq->result, sha256_zero_message_hash,
567 return safexcel_ahash_enqueue(areq);
570 static int safexcel_ahash_finup(struct ahash_request *areq)
572 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
574 req->last_req = true;
577 safexcel_ahash_update(areq);
578 return safexcel_ahash_final(areq);
581 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
583 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
584 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
585 struct safexcel_ahash_export_state *export = out;
587 export->len = req->len;
588 export->processed = req->processed;
590 memcpy(export->state, req->state, req->state_sz);
591 memset(export->cache, 0, crypto_ahash_blocksize(ahash));
592 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
597 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
599 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
600 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
601 const struct safexcel_ahash_export_state *export = in;
604 ret = crypto_ahash_init(areq);
608 req->len = export->len;
609 req->processed = export->processed;
611 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
612 memcpy(req->state, export->state, req->state_sz);
617 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
619 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
620 struct safexcel_alg_template *tmpl =
621 container_of(__crypto_ahash_alg(tfm->__crt_alg),
622 struct safexcel_alg_template, alg.ahash);
624 ctx->priv = tmpl->priv;
626 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
627 sizeof(struct safexcel_ahash_req));
631 static int safexcel_sha1_init(struct ahash_request *areq)
633 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
634 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
636 memset(req, 0, sizeof(*req));
638 req->state[0] = SHA1_H0;
639 req->state[1] = SHA1_H1;
640 req->state[2] = SHA1_H2;
641 req->state[3] = SHA1_H3;
642 req->state[4] = SHA1_H4;
644 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
645 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
646 req->state_sz = SHA1_DIGEST_SIZE;
651 static int safexcel_sha1_digest(struct ahash_request *areq)
653 int ret = safexcel_sha1_init(areq);
658 return safexcel_ahash_finup(areq);
661 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
663 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
664 struct safexcel_crypto_priv *priv = ctx->priv;
667 /* context not allocated, skip invalidation */
671 ret = safexcel_ahash_exit_inv(tfm);
673 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
676 struct safexcel_alg_template safexcel_alg_sha1 = {
677 .type = SAFEXCEL_ALG_TYPE_AHASH,
679 .init = safexcel_sha1_init,
680 .update = safexcel_ahash_update,
681 .final = safexcel_ahash_final,
682 .finup = safexcel_ahash_finup,
683 .digest = safexcel_sha1_digest,
684 .export = safexcel_ahash_export,
685 .import = safexcel_ahash_import,
687 .digestsize = SHA1_DIGEST_SIZE,
688 .statesize = sizeof(struct safexcel_ahash_export_state),
691 .cra_driver_name = "safexcel-sha1",
693 .cra_flags = CRYPTO_ALG_ASYNC |
694 CRYPTO_ALG_KERN_DRIVER_ONLY,
695 .cra_blocksize = SHA1_BLOCK_SIZE,
696 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
697 .cra_init = safexcel_ahash_cra_init,
698 .cra_exit = safexcel_ahash_cra_exit,
699 .cra_module = THIS_MODULE,
705 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
707 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
709 safexcel_sha1_init(areq);
710 ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
714 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
716 int ret = safexcel_hmac_sha1_init(areq);
721 return safexcel_ahash_finup(areq);
724 struct safexcel_ahash_result {
725 struct completion completion;
729 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
731 struct safexcel_ahash_result *result = req->data;
733 if (error == -EINPROGRESS)
736 result->error = error;
737 complete(&result->completion);
740 static int safexcel_hmac_init_pad(struct ahash_request *areq,
741 unsigned int blocksize, const u8 *key,
742 unsigned int keylen, u8 *ipad, u8 *opad)
744 struct safexcel_ahash_result result;
745 struct scatterlist sg;
749 if (keylen <= blocksize) {
750 memcpy(ipad, key, keylen);
752 keydup = kmemdup(key, keylen, GFP_KERNEL);
756 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
757 safexcel_ahash_complete, &result);
758 sg_init_one(&sg, keydup, keylen);
759 ahash_request_set_crypt(areq, &sg, ipad, keylen);
760 init_completion(&result.completion);
762 ret = crypto_ahash_digest(areq);
763 if (ret == -EINPROGRESS) {
764 wait_for_completion_interruptible(&result.completion);
769 memzero_explicit(keydup, keylen);
775 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
778 memset(ipad + keylen, 0, blocksize - keylen);
779 memcpy(opad, ipad, blocksize);
781 for (i = 0; i < blocksize; i++) {
782 ipad[i] ^= HMAC_IPAD_VALUE;
783 opad[i] ^= HMAC_OPAD_VALUE;
789 static int safexcel_hmac_init_iv(struct ahash_request *areq,
790 unsigned int blocksize, u8 *pad, void *state)
792 struct safexcel_ahash_result result;
793 struct safexcel_ahash_req *req;
794 struct scatterlist sg;
797 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
798 safexcel_ahash_complete, &result);
799 sg_init_one(&sg, pad, blocksize);
800 ahash_request_set_crypt(areq, &sg, pad, blocksize);
801 init_completion(&result.completion);
803 ret = crypto_ahash_init(areq);
807 req = ahash_request_ctx(areq);
809 req->last_req = true;
811 ret = crypto_ahash_update(areq);
812 if (ret && ret != -EINPROGRESS)
815 wait_for_completion_interruptible(&result.completion);
819 return crypto_ahash_export(areq, state);
822 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
823 unsigned int keylen, void *istate, void *ostate)
825 struct ahash_request *areq;
826 struct crypto_ahash *tfm;
827 unsigned int blocksize;
831 tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
832 CRYPTO_ALG_TYPE_AHASH_MASK);
836 areq = ahash_request_alloc(tfm, GFP_KERNEL);
842 crypto_ahash_clear_flags(tfm, ~0);
843 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
845 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
851 opad = ipad + blocksize;
853 ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
857 ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
861 ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
866 ahash_request_free(areq);
868 crypto_free_ahash(tfm);
873 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
876 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
877 struct safexcel_ahash_export_state istate, ostate;
880 ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
884 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
885 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
886 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
887 ctx->base.needs_inv = true;
892 memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
893 memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
898 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
899 .type = SAFEXCEL_ALG_TYPE_AHASH,
901 .init = safexcel_hmac_sha1_init,
902 .update = safexcel_ahash_update,
903 .final = safexcel_ahash_final,
904 .finup = safexcel_ahash_finup,
905 .digest = safexcel_hmac_sha1_digest,
906 .setkey = safexcel_hmac_sha1_setkey,
907 .export = safexcel_ahash_export,
908 .import = safexcel_ahash_import,
910 .digestsize = SHA1_DIGEST_SIZE,
911 .statesize = sizeof(struct safexcel_ahash_export_state),
913 .cra_name = "hmac(sha1)",
914 .cra_driver_name = "safexcel-hmac-sha1",
916 .cra_flags = CRYPTO_ALG_ASYNC |
917 CRYPTO_ALG_KERN_DRIVER_ONLY,
918 .cra_blocksize = SHA1_BLOCK_SIZE,
919 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
920 .cra_init = safexcel_ahash_cra_init,
921 .cra_exit = safexcel_ahash_cra_exit,
922 .cra_module = THIS_MODULE,
928 static int safexcel_sha256_init(struct ahash_request *areq)
930 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
931 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
933 memset(req, 0, sizeof(*req));
935 req->state[0] = SHA256_H0;
936 req->state[1] = SHA256_H1;
937 req->state[2] = SHA256_H2;
938 req->state[3] = SHA256_H3;
939 req->state[4] = SHA256_H4;
940 req->state[5] = SHA256_H5;
941 req->state[6] = SHA256_H6;
942 req->state[7] = SHA256_H7;
944 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
945 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
946 req->state_sz = SHA256_DIGEST_SIZE;
951 static int safexcel_sha256_digest(struct ahash_request *areq)
953 int ret = safexcel_sha256_init(areq);
958 return safexcel_ahash_finup(areq);
961 struct safexcel_alg_template safexcel_alg_sha256 = {
962 .type = SAFEXCEL_ALG_TYPE_AHASH,
964 .init = safexcel_sha256_init,
965 .update = safexcel_ahash_update,
966 .final = safexcel_ahash_final,
967 .finup = safexcel_ahash_finup,
968 .digest = safexcel_sha256_digest,
969 .export = safexcel_ahash_export,
970 .import = safexcel_ahash_import,
972 .digestsize = SHA256_DIGEST_SIZE,
973 .statesize = sizeof(struct safexcel_ahash_export_state),
975 .cra_name = "sha256",
976 .cra_driver_name = "safexcel-sha256",
978 .cra_flags = CRYPTO_ALG_ASYNC |
979 CRYPTO_ALG_KERN_DRIVER_ONLY,
980 .cra_blocksize = SHA256_BLOCK_SIZE,
981 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
982 .cra_init = safexcel_ahash_cra_init,
983 .cra_exit = safexcel_ahash_cra_exit,
984 .cra_module = THIS_MODULE,
990 static int safexcel_sha224_init(struct ahash_request *areq)
992 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
993 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
995 memset(req, 0, sizeof(*req));
997 req->state[0] = SHA224_H0;
998 req->state[1] = SHA224_H1;
999 req->state[2] = SHA224_H2;
1000 req->state[3] = SHA224_H3;
1001 req->state[4] = SHA224_H4;
1002 req->state[5] = SHA224_H5;
1003 req->state[6] = SHA224_H6;
1004 req->state[7] = SHA224_H7;
1006 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1007 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1008 req->state_sz = SHA256_DIGEST_SIZE;
1013 static int safexcel_sha224_digest(struct ahash_request *areq)
1015 int ret = safexcel_sha224_init(areq);
1020 return safexcel_ahash_finup(areq);
1023 struct safexcel_alg_template safexcel_alg_sha224 = {
1024 .type = SAFEXCEL_ALG_TYPE_AHASH,
1026 .init = safexcel_sha224_init,
1027 .update = safexcel_ahash_update,
1028 .final = safexcel_ahash_final,
1029 .finup = safexcel_ahash_finup,
1030 .digest = safexcel_sha224_digest,
1031 .export = safexcel_ahash_export,
1032 .import = safexcel_ahash_import,
1034 .digestsize = SHA224_DIGEST_SIZE,
1035 .statesize = sizeof(struct safexcel_ahash_export_state),
1037 .cra_name = "sha224",
1038 .cra_driver_name = "safexcel-sha224",
1039 .cra_priority = 300,
1040 .cra_flags = CRYPTO_ALG_ASYNC |
1041 CRYPTO_ALG_KERN_DRIVER_ONLY,
1042 .cra_blocksize = SHA224_BLOCK_SIZE,
1043 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1044 .cra_init = safexcel_ahash_cra_init,
1045 .cra_exit = safexcel_ahash_cra_exit,
1046 .cra_module = THIS_MODULE,