2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
20 struct safexcel_ahash_ctx {
21 struct safexcel_context base;
22 struct safexcel_crypto_priv *priv;
27 u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
28 u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
31 struct safexcel_ahash_req {
37 u8 state_sz; /* expected sate size, only set once */
38 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
43 u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
44 u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
47 struct safexcel_ahash_export_state {
51 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
52 u8 cache[SHA256_BLOCK_SIZE];
55 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
56 u32 input_length, u32 result_length)
58 struct safexcel_token *token =
59 (struct safexcel_token *)cdesc->control_data.token;
61 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
62 token[0].packet_length = input_length;
63 token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
64 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
66 token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
67 token[1].packet_length = result_length;
68 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
69 EIP197_TOKEN_STAT_LAST_PACKET;
70 token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
71 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
74 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
75 struct safexcel_ahash_req *req,
76 struct safexcel_command_desc *cdesc,
77 unsigned int digestsize,
78 unsigned int blocksize)
82 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
83 cdesc->control_data.control0 |= ctx->alg;
84 cdesc->control_data.control0 |= ctx->digest;
86 if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
88 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
89 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
90 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
91 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
92 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
94 cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
96 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
100 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
103 * Copy the input digest if needed, and setup the context
104 * fields. Do this now as we need it to setup the first command
107 if (req->processed) {
108 for (i = 0; i < digestsize / sizeof(u32); i++)
109 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
112 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
114 } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
115 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
117 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
118 memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
119 ctx->opad, digestsize);
123 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
124 struct crypto_async_request *async,
125 bool *should_complete, int *ret)
127 struct safexcel_result_desc *rdesc;
128 struct ahash_request *areq = ahash_request_cast(async);
129 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
130 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
135 spin_lock_bh(&priv->ring[ring].egress_lock);
136 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
139 "hash: result: could not retrieve the result descriptor\n");
140 *ret = PTR_ERR(rdesc);
141 } else if (rdesc->result_data.error_code) {
143 "hash: result: result descriptor error (%d)\n",
144 rdesc->result_data.error_code);
148 safexcel_complete(priv, ring);
149 spin_unlock_bh(&priv->ring[ring].egress_lock);
152 memcpy(areq->result, sreq->state,
153 crypto_ahash_digestsize(ahash));
155 dma_unmap_sg(priv->dev, areq->src,
156 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
158 safexcel_free_context(priv, async, sreq->state_sz);
160 cache_len = sreq->len - sreq->processed;
162 memcpy(sreq->cache, sreq->cache_next, cache_len);
164 *should_complete = true;
169 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
170 struct safexcel_request *request,
171 int *commands, int *results)
173 struct ahash_request *areq = ahash_request_cast(async);
174 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
175 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
176 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
177 struct safexcel_crypto_priv *priv = ctx->priv;
178 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
179 struct safexcel_result_desc *rdesc;
180 struct scatterlist *sg;
181 int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
183 queued = len = req->len - req->processed;
184 if (queued < crypto_ahash_blocksize(ahash))
187 cache_len = queued - areq->nbytes;
190 * If this is not the last request and the queued data does not fit
191 * into full blocks, cache it for the next send() call.
193 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
194 if (!req->last_req && extra) {
195 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
196 req->cache_next, extra, areq->nbytes - extra);
202 spin_lock_bh(&priv->ring[ring].egress_lock);
204 /* Add a command descriptor for the cached data, if any */
206 ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
207 if (!ctx->base.cache) {
211 memcpy(ctx->base.cache, req->cache, cache_len);
212 ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
213 cache_len, DMA_TO_DEVICE);
214 if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
219 ctx->base.cache_sz = cache_len;
220 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
225 if (IS_ERR(first_cdesc)) {
226 ret = PTR_ERR(first_cdesc);
236 /* Now handle the current ahash request buffer(s) */
237 nents = dma_map_sg(priv->dev, areq->src,
238 sg_nents_for_len(areq->src, areq->nbytes),
245 for_each_sg(areq->src, sg, nents, i) {
246 int sglen = sg_dma_len(sg);
248 /* Do not overflow the request */
249 if (queued - sglen < 0)
252 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
253 !(queued - sglen), sg_dma_address(sg),
254 sglen, len, ctx->base.ctxr_dma);
256 ret = PTR_ERR(cdesc);
270 /* Setup the context options */
271 safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
272 crypto_ahash_blocksize(ahash));
275 safexcel_hash_token(first_cdesc, len, req->state_sz);
277 ctx->base.result_dma = dma_map_single(priv->dev, req->state,
278 req->state_sz, DMA_FROM_DEVICE);
279 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
284 /* Add a result descriptor */
285 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
288 ret = PTR_ERR(rdesc);
292 spin_unlock_bh(&priv->ring[ring].egress_lock);
294 req->processed += len;
295 request->req = &areq->base;
302 for (i = 0; i < n_cdesc; i++)
303 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
305 if (ctx->base.cache_dma) {
306 dma_unmap_single(priv->dev, ctx->base.cache_dma,
307 ctx->base.cache_sz, DMA_TO_DEVICE);
308 ctx->base.cache_sz = 0;
311 kfree(ctx->base.cache);
312 ctx->base.cache = NULL;
315 spin_unlock_bh(&priv->ring[ring].egress_lock);
319 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
321 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
322 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
323 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
324 unsigned int state_w_sz = req->state_sz / sizeof(u32);
327 for (i = 0; i < state_w_sz; i++)
328 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
331 if (ctx->base.ctxr->data[state_w_sz] !=
332 cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
338 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
340 struct crypto_async_request *async,
341 bool *should_complete, int *ret)
343 struct safexcel_result_desc *rdesc;
344 struct ahash_request *areq = ahash_request_cast(async);
345 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
346 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
351 spin_lock_bh(&priv->ring[ring].egress_lock);
352 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
355 "hash: invalidate: could not retrieve the result descriptor\n");
356 *ret = PTR_ERR(rdesc);
357 } else if (rdesc->result_data.error_code) {
359 "hash: invalidate: result descriptor error (%d)\n",
360 rdesc->result_data.error_code);
364 safexcel_complete(priv, ring);
365 spin_unlock_bh(&priv->ring[ring].egress_lock);
367 if (ctx->base.exit_inv) {
368 dma_pool_free(priv->context_pool, ctx->base.ctxr,
371 *should_complete = true;
375 ring = safexcel_select_ring(priv);
376 ctx->base.ring = ring;
378 spin_lock_bh(&priv->ring[ring].queue_lock);
379 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
380 spin_unlock_bh(&priv->ring[ring].queue_lock);
382 if (enq_ret != -EINPROGRESS)
385 if (!priv->ring[ring].need_dequeue)
386 safexcel_dequeue(priv, ring);
388 *should_complete = false;
393 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
394 struct crypto_async_request *async,
395 bool *should_complete, int *ret)
397 struct ahash_request *areq = ahash_request_cast(async);
398 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
401 if (req->needs_inv) {
402 req->needs_inv = false;
403 err = safexcel_handle_inv_result(priv, ring, async,
404 should_complete, ret);
406 err = safexcel_handle_req_result(priv, ring, async,
407 should_complete, ret);
413 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
414 int ring, struct safexcel_request *request,
415 int *commands, int *results)
417 struct ahash_request *areq = ahash_request_cast(async);
418 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
421 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
422 ctx->base.ctxr_dma, ring, request);
432 static int safexcel_ahash_send(struct crypto_async_request *async,
433 int ring, struct safexcel_request *request,
434 int *commands, int *results)
436 struct ahash_request *areq = ahash_request_cast(async);
437 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
441 ret = safexcel_ahash_send_inv(async, ring, request,
444 ret = safexcel_ahash_send_req(async, ring, request,
449 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
451 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
452 struct safexcel_crypto_priv *priv = ctx->priv;
453 AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
454 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
455 struct safexcel_inv_result result = {};
456 int ring = ctx->base.ring;
458 memset(req, 0, sizeof(struct ahash_request));
460 /* create invalidation request */
461 init_completion(&result.completion);
462 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
463 safexcel_inv_complete, &result);
465 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
466 ctx = crypto_tfm_ctx(req->base.tfm);
467 ctx->base.exit_inv = true;
468 rctx->needs_inv = true;
470 spin_lock_bh(&priv->ring[ring].queue_lock);
471 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
472 spin_unlock_bh(&priv->ring[ring].queue_lock);
474 if (!priv->ring[ring].need_dequeue)
475 safexcel_dequeue(priv, ring);
477 wait_for_completion_interruptible(&result.completion);
480 dev_warn(priv->dev, "hash: completion error (%d)\n",
488 static int safexcel_ahash_cache(struct ahash_request *areq)
490 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
491 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
492 int queued, cache_len;
494 cache_len = req->len - areq->nbytes - req->processed;
495 queued = req->len - req->processed;
498 * In case there isn't enough bytes to proceed (less than a
499 * block size), cache the data until we have enough.
501 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
502 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
503 req->cache + cache_len,
508 /* We could'nt cache all the data */
512 static int safexcel_ahash_enqueue(struct ahash_request *areq)
514 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
515 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
516 struct safexcel_crypto_priv *priv = ctx->priv;
519 req->needs_inv = false;
521 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
522 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
524 if (ctx->base.ctxr) {
525 if (ctx->base.needs_inv) {
526 ctx->base.needs_inv = false;
527 req->needs_inv = true;
530 ctx->base.ring = safexcel_select_ring(priv);
531 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
532 EIP197_GFP_FLAGS(areq->base),
533 &ctx->base.ctxr_dma);
538 ring = ctx->base.ring;
540 spin_lock_bh(&priv->ring[ring].queue_lock);
541 ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
542 spin_unlock_bh(&priv->ring[ring].queue_lock);
544 if (!priv->ring[ring].need_dequeue)
545 safexcel_dequeue(priv, ring);
550 static int safexcel_ahash_update(struct ahash_request *areq)
552 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
553 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
554 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
556 /* If the request is 0 length, do nothing */
560 req->len += areq->nbytes;
562 safexcel_ahash_cache(areq);
565 * We're not doing partial updates when performing an hmac request.
566 * Everything will be handled by the final() call.
568 if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
572 return safexcel_ahash_enqueue(areq);
574 if (!req->last_req &&
575 req->len - req->processed > crypto_ahash_blocksize(ahash))
576 return safexcel_ahash_enqueue(areq);
581 static int safexcel_ahash_final(struct ahash_request *areq)
583 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
584 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
586 req->last_req = true;
589 /* If we have an overall 0 length request */
590 if (!(req->len + areq->nbytes)) {
591 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
592 memcpy(areq->result, sha1_zero_message_hash,
594 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
595 memcpy(areq->result, sha224_zero_message_hash,
597 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
598 memcpy(areq->result, sha256_zero_message_hash,
604 return safexcel_ahash_enqueue(areq);
607 static int safexcel_ahash_finup(struct ahash_request *areq)
609 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
611 req->last_req = true;
614 safexcel_ahash_update(areq);
615 return safexcel_ahash_final(areq);
618 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
620 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
621 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
622 struct safexcel_ahash_export_state *export = out;
624 export->len = req->len;
625 export->processed = req->processed;
627 memcpy(export->state, req->state, req->state_sz);
628 memset(export->cache, 0, crypto_ahash_blocksize(ahash));
629 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
634 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
636 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
637 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
638 const struct safexcel_ahash_export_state *export = in;
641 ret = crypto_ahash_init(areq);
645 req->len = export->len;
646 req->processed = export->processed;
648 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
649 memcpy(req->state, export->state, req->state_sz);
654 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
656 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
657 struct safexcel_alg_template *tmpl =
658 container_of(__crypto_ahash_alg(tfm->__crt_alg),
659 struct safexcel_alg_template, alg.ahash);
661 ctx->priv = tmpl->priv;
662 ctx->base.send = safexcel_ahash_send;
663 ctx->base.handle_result = safexcel_handle_result;
665 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
666 sizeof(struct safexcel_ahash_req));
670 static int safexcel_sha1_init(struct ahash_request *areq)
672 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
673 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
675 memset(req, 0, sizeof(*req));
677 req->state[0] = SHA1_H0;
678 req->state[1] = SHA1_H1;
679 req->state[2] = SHA1_H2;
680 req->state[3] = SHA1_H3;
681 req->state[4] = SHA1_H4;
683 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
684 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
685 req->state_sz = SHA1_DIGEST_SIZE;
690 static int safexcel_sha1_digest(struct ahash_request *areq)
692 int ret = safexcel_sha1_init(areq);
697 return safexcel_ahash_finup(areq);
700 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
702 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
703 struct safexcel_crypto_priv *priv = ctx->priv;
706 /* context not allocated, skip invalidation */
710 ret = safexcel_ahash_exit_inv(tfm);
712 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
715 struct safexcel_alg_template safexcel_alg_sha1 = {
716 .type = SAFEXCEL_ALG_TYPE_AHASH,
718 .init = safexcel_sha1_init,
719 .update = safexcel_ahash_update,
720 .final = safexcel_ahash_final,
721 .finup = safexcel_ahash_finup,
722 .digest = safexcel_sha1_digest,
723 .export = safexcel_ahash_export,
724 .import = safexcel_ahash_import,
726 .digestsize = SHA1_DIGEST_SIZE,
727 .statesize = sizeof(struct safexcel_ahash_export_state),
730 .cra_driver_name = "safexcel-sha1",
732 .cra_flags = CRYPTO_ALG_ASYNC |
733 CRYPTO_ALG_KERN_DRIVER_ONLY,
734 .cra_blocksize = SHA1_BLOCK_SIZE,
735 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
736 .cra_init = safexcel_ahash_cra_init,
737 .cra_exit = safexcel_ahash_cra_exit,
738 .cra_module = THIS_MODULE,
744 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
746 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
748 safexcel_sha1_init(areq);
749 ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
753 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
755 int ret = safexcel_hmac_sha1_init(areq);
760 return safexcel_ahash_finup(areq);
763 struct safexcel_ahash_result {
764 struct completion completion;
768 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
770 struct safexcel_ahash_result *result = req->data;
772 if (error == -EINPROGRESS)
775 result->error = error;
776 complete(&result->completion);
779 static int safexcel_hmac_init_pad(struct ahash_request *areq,
780 unsigned int blocksize, const u8 *key,
781 unsigned int keylen, u8 *ipad, u8 *opad)
783 struct safexcel_ahash_result result;
784 struct scatterlist sg;
788 if (keylen <= blocksize) {
789 memcpy(ipad, key, keylen);
791 keydup = kmemdup(key, keylen, GFP_KERNEL);
795 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
796 safexcel_ahash_complete, &result);
797 sg_init_one(&sg, keydup, keylen);
798 ahash_request_set_crypt(areq, &sg, ipad, keylen);
799 init_completion(&result.completion);
801 ret = crypto_ahash_digest(areq);
802 if (ret == -EINPROGRESS) {
803 wait_for_completion_interruptible(&result.completion);
808 memzero_explicit(keydup, keylen);
814 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
817 memset(ipad + keylen, 0, blocksize - keylen);
818 memcpy(opad, ipad, blocksize);
820 for (i = 0; i < blocksize; i++) {
821 ipad[i] ^= HMAC_IPAD_VALUE;
822 opad[i] ^= HMAC_OPAD_VALUE;
828 static int safexcel_hmac_init_iv(struct ahash_request *areq,
829 unsigned int blocksize, u8 *pad, void *state)
831 struct safexcel_ahash_result result;
832 struct safexcel_ahash_req *req;
833 struct scatterlist sg;
836 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
837 safexcel_ahash_complete, &result);
838 sg_init_one(&sg, pad, blocksize);
839 ahash_request_set_crypt(areq, &sg, pad, blocksize);
840 init_completion(&result.completion);
842 ret = crypto_ahash_init(areq);
846 req = ahash_request_ctx(areq);
848 req->last_req = true;
850 ret = crypto_ahash_update(areq);
851 if (ret && ret != -EINPROGRESS)
854 wait_for_completion_interruptible(&result.completion);
858 return crypto_ahash_export(areq, state);
861 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
862 unsigned int keylen, void *istate, void *ostate)
864 struct ahash_request *areq;
865 struct crypto_ahash *tfm;
866 unsigned int blocksize;
870 tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
871 CRYPTO_ALG_TYPE_AHASH_MASK);
875 areq = ahash_request_alloc(tfm, GFP_KERNEL);
881 crypto_ahash_clear_flags(tfm, ~0);
882 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
884 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
890 opad = ipad + blocksize;
892 ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
896 ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
900 ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
905 ahash_request_free(areq);
907 crypto_free_ahash(tfm);
912 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
915 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
916 struct safexcel_ahash_export_state istate, ostate;
919 ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
923 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
924 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
925 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
926 ctx->base.needs_inv = true;
931 memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
932 memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
937 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
938 .type = SAFEXCEL_ALG_TYPE_AHASH,
940 .init = safexcel_hmac_sha1_init,
941 .update = safexcel_ahash_update,
942 .final = safexcel_ahash_final,
943 .finup = safexcel_ahash_finup,
944 .digest = safexcel_hmac_sha1_digest,
945 .setkey = safexcel_hmac_sha1_setkey,
946 .export = safexcel_ahash_export,
947 .import = safexcel_ahash_import,
949 .digestsize = SHA1_DIGEST_SIZE,
950 .statesize = sizeof(struct safexcel_ahash_export_state),
952 .cra_name = "hmac(sha1)",
953 .cra_driver_name = "safexcel-hmac-sha1",
955 .cra_flags = CRYPTO_ALG_ASYNC |
956 CRYPTO_ALG_KERN_DRIVER_ONLY,
957 .cra_blocksize = SHA1_BLOCK_SIZE,
958 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
959 .cra_init = safexcel_ahash_cra_init,
960 .cra_exit = safexcel_ahash_cra_exit,
961 .cra_module = THIS_MODULE,
967 static int safexcel_sha256_init(struct ahash_request *areq)
969 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
970 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
972 memset(req, 0, sizeof(*req));
974 req->state[0] = SHA256_H0;
975 req->state[1] = SHA256_H1;
976 req->state[2] = SHA256_H2;
977 req->state[3] = SHA256_H3;
978 req->state[4] = SHA256_H4;
979 req->state[5] = SHA256_H5;
980 req->state[6] = SHA256_H6;
981 req->state[7] = SHA256_H7;
983 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
984 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
985 req->state_sz = SHA256_DIGEST_SIZE;
990 static int safexcel_sha256_digest(struct ahash_request *areq)
992 int ret = safexcel_sha256_init(areq);
997 return safexcel_ahash_finup(areq);
1000 struct safexcel_alg_template safexcel_alg_sha256 = {
1001 .type = SAFEXCEL_ALG_TYPE_AHASH,
1003 .init = safexcel_sha256_init,
1004 .update = safexcel_ahash_update,
1005 .final = safexcel_ahash_final,
1006 .finup = safexcel_ahash_finup,
1007 .digest = safexcel_sha256_digest,
1008 .export = safexcel_ahash_export,
1009 .import = safexcel_ahash_import,
1011 .digestsize = SHA256_DIGEST_SIZE,
1012 .statesize = sizeof(struct safexcel_ahash_export_state),
1014 .cra_name = "sha256",
1015 .cra_driver_name = "safexcel-sha256",
1016 .cra_priority = 300,
1017 .cra_flags = CRYPTO_ALG_ASYNC |
1018 CRYPTO_ALG_KERN_DRIVER_ONLY,
1019 .cra_blocksize = SHA256_BLOCK_SIZE,
1020 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1021 .cra_init = safexcel_ahash_cra_init,
1022 .cra_exit = safexcel_ahash_cra_exit,
1023 .cra_module = THIS_MODULE,
1029 static int safexcel_sha224_init(struct ahash_request *areq)
1031 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1032 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1034 memset(req, 0, sizeof(*req));
1036 req->state[0] = SHA224_H0;
1037 req->state[1] = SHA224_H1;
1038 req->state[2] = SHA224_H2;
1039 req->state[3] = SHA224_H3;
1040 req->state[4] = SHA224_H4;
1041 req->state[5] = SHA224_H5;
1042 req->state[6] = SHA224_H6;
1043 req->state[7] = SHA224_H7;
1045 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1046 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1047 req->state_sz = SHA256_DIGEST_SIZE;
1052 static int safexcel_sha224_digest(struct ahash_request *areq)
1054 int ret = safexcel_sha224_init(areq);
1059 return safexcel_ahash_finup(areq);
1062 struct safexcel_alg_template safexcel_alg_sha224 = {
1063 .type = SAFEXCEL_ALG_TYPE_AHASH,
1065 .init = safexcel_sha224_init,
1066 .update = safexcel_ahash_update,
1067 .final = safexcel_ahash_final,
1068 .finup = safexcel_ahash_finup,
1069 .digest = safexcel_sha224_digest,
1070 .export = safexcel_ahash_export,
1071 .import = safexcel_ahash_import,
1073 .digestsize = SHA224_DIGEST_SIZE,
1074 .statesize = sizeof(struct safexcel_ahash_export_state),
1076 .cra_name = "sha224",
1077 .cra_driver_name = "safexcel-sha224",
1078 .cra_priority = 300,
1079 .cra_flags = CRYPTO_ALG_ASYNC |
1080 CRYPTO_ALG_KERN_DRIVER_ONLY,
1081 .cra_blocksize = SHA224_BLOCK_SIZE,
1082 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1083 .cra_init = safexcel_ahash_cra_init,
1084 .cra_exit = safexcel_ahash_cra_exit,
1085 .cra_module = THIS_MODULE,