Merge branch 'for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
[sfrench/cifs-2.6.git] / drivers / crypto / inside-secure / safexcel_hash.c
1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <antoine.tenart@free-electrons.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16
17
18 #include "safexcel.h"
19
20 struct safexcel_ahash_ctx {
21         struct safexcel_context base;
22         struct safexcel_crypto_priv *priv;
23
24         u32 alg;
25         u32 digest;
26
27         u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
28         u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
29 };
30
31 struct safexcel_ahash_req {
32         bool last_req;
33         bool finish;
34         bool hmac;
35         bool needs_inv;
36
37         u8 state_sz;    /* expected sate size, only set once */
38         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
39
40         u64 len;
41         u64 processed;
42
43         u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
44         u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
45 };
46
47 struct safexcel_ahash_export_state {
48         u64 len;
49         u64 processed;
50
51         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
52         u8 cache[SHA256_BLOCK_SIZE];
53 };
54
55 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
56                                 u32 input_length, u32 result_length)
57 {
58         struct safexcel_token *token =
59                 (struct safexcel_token *)cdesc->control_data.token;
60
61         token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
62         token[0].packet_length = input_length;
63         token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
64         token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
65
66         token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
67         token[1].packet_length = result_length;
68         token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
69                         EIP197_TOKEN_STAT_LAST_PACKET;
70         token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
71                                 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
72 }
73
74 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
75                                      struct safexcel_ahash_req *req,
76                                      struct safexcel_command_desc *cdesc,
77                                      unsigned int digestsize,
78                                      unsigned int blocksize)
79 {
80         int i;
81
82         cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
83         cdesc->control_data.control0 |= ctx->alg;
84         cdesc->control_data.control0 |= ctx->digest;
85
86         if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
87                 if (req->processed) {
88                         if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
89                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
90                         else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
91                                  ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
92                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
93
94                         cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
95                 } else {
96                         cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
97                 }
98
99                 if (!req->finish)
100                         cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
101
102                 /*
103                  * Copy the input digest if needed, and setup the context
104                  * fields. Do this now as we need it to setup the first command
105                  * descriptor.
106                  */
107                 if (req->processed) {
108                         for (i = 0; i < digestsize / sizeof(u32); i++)
109                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
110
111                         if (req->finish)
112                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
113                 }
114         } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
115                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
116
117                 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
118                 memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
119                        ctx->opad, digestsize);
120         }
121 }
122
123 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
124                                       struct crypto_async_request *async,
125                                       bool *should_complete, int *ret)
126 {
127         struct safexcel_result_desc *rdesc;
128         struct ahash_request *areq = ahash_request_cast(async);
129         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
130         struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
131         int cache_len;
132
133         *ret = 0;
134
135         spin_lock_bh(&priv->ring[ring].egress_lock);
136         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
137         if (IS_ERR(rdesc)) {
138                 dev_err(priv->dev,
139                         "hash: result: could not retrieve the result descriptor\n");
140                 *ret = PTR_ERR(rdesc);
141         } else if (rdesc->result_data.error_code) {
142                 dev_err(priv->dev,
143                         "hash: result: result descriptor error (%d)\n",
144                         rdesc->result_data.error_code);
145                 *ret = -EINVAL;
146         }
147
148         safexcel_complete(priv, ring);
149         spin_unlock_bh(&priv->ring[ring].egress_lock);
150
151         if (sreq->finish)
152                 memcpy(areq->result, sreq->state,
153                        crypto_ahash_digestsize(ahash));
154
155         dma_unmap_sg(priv->dev, areq->src,
156                      sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
157
158         safexcel_free_context(priv, async, sreq->state_sz);
159
160         cache_len = sreq->len - sreq->processed;
161         if (cache_len)
162                 memcpy(sreq->cache, sreq->cache_next, cache_len);
163
164         *should_complete = true;
165
166         return 1;
167 }
168
169 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
170                                    struct safexcel_request *request,
171                                    int *commands, int *results)
172 {
173         struct ahash_request *areq = ahash_request_cast(async);
174         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
175         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
176         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
177         struct safexcel_crypto_priv *priv = ctx->priv;
178         struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
179         struct safexcel_result_desc *rdesc;
180         struct scatterlist *sg;
181         int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
182
183         queued = len = req->len - req->processed;
184         if (queued < crypto_ahash_blocksize(ahash))
185                 cache_len = queued;
186         else
187                 cache_len = queued - areq->nbytes;
188
189         /*
190          * If this is not the last request and the queued data does not fit
191          * into full blocks, cache it for the next send() call.
192          */
193         extra = queued & (crypto_ahash_blocksize(ahash) - 1);
194         if (!req->last_req && extra) {
195                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
196                                    req->cache_next, extra, areq->nbytes - extra);
197
198                 queued -= extra;
199                 len -= extra;
200         }
201
202         spin_lock_bh(&priv->ring[ring].egress_lock);
203
204         /* Add a command descriptor for the cached data, if any */
205         if (cache_len) {
206                 ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
207                 if (!ctx->base.cache) {
208                         ret = -ENOMEM;
209                         goto unlock;
210                 }
211                 memcpy(ctx->base.cache, req->cache, cache_len);
212                 ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
213                                                      cache_len, DMA_TO_DEVICE);
214                 if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
215                         ret = -EINVAL;
216                         goto free_cache;
217                 }
218
219                 ctx->base.cache_sz = cache_len;
220                 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
221                                                  (cache_len == len),
222                                                  ctx->base.cache_dma,
223                                                  cache_len, len,
224                                                  ctx->base.ctxr_dma);
225                 if (IS_ERR(first_cdesc)) {
226                         ret = PTR_ERR(first_cdesc);
227                         goto unmap_cache;
228                 }
229                 n_cdesc++;
230
231                 queued -= cache_len;
232                 if (!queued)
233                         goto send_command;
234         }
235
236         /* Now handle the current ahash request buffer(s) */
237         nents = dma_map_sg(priv->dev, areq->src,
238                        sg_nents_for_len(areq->src, areq->nbytes),
239                        DMA_TO_DEVICE);
240         if (!nents) {
241                 ret = -ENOMEM;
242                 goto cdesc_rollback;
243         }
244
245         for_each_sg(areq->src, sg, nents, i) {
246                 int sglen = sg_dma_len(sg);
247
248                 /* Do not overflow the request */
249                 if (queued - sglen < 0)
250                         sglen = queued;
251
252                 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
253                                            !(queued - sglen), sg_dma_address(sg),
254                                            sglen, len, ctx->base.ctxr_dma);
255                 if (IS_ERR(cdesc)) {
256                         ret = PTR_ERR(cdesc);
257                         goto cdesc_rollback;
258                 }
259                 n_cdesc++;
260
261                 if (n_cdesc == 1)
262                         first_cdesc = cdesc;
263
264                 queued -= sglen;
265                 if (!queued)
266                         break;
267         }
268
269 send_command:
270         /* Setup the context options */
271         safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
272                                  crypto_ahash_blocksize(ahash));
273
274         /* Add the token */
275         safexcel_hash_token(first_cdesc, len, req->state_sz);
276
277         ctx->base.result_dma = dma_map_single(priv->dev, req->state,
278                                               req->state_sz, DMA_FROM_DEVICE);
279         if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
280                 ret = -EINVAL;
281                 goto cdesc_rollback;
282         }
283
284         /* Add a result descriptor */
285         rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
286                                    req->state_sz);
287         if (IS_ERR(rdesc)) {
288                 ret = PTR_ERR(rdesc);
289                 goto cdesc_rollback;
290         }
291
292         spin_unlock_bh(&priv->ring[ring].egress_lock);
293
294         req->processed += len;
295         request->req = &areq->base;
296
297         *commands = n_cdesc;
298         *results = 1;
299         return 0;
300
301 cdesc_rollback:
302         for (i = 0; i < n_cdesc; i++)
303                 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
304 unmap_cache:
305         if (ctx->base.cache_dma) {
306                 dma_unmap_single(priv->dev, ctx->base.cache_dma,
307                                  ctx->base.cache_sz, DMA_TO_DEVICE);
308                 ctx->base.cache_sz = 0;
309         }
310 free_cache:
311         kfree(ctx->base.cache);
312         ctx->base.cache = NULL;
313
314 unlock:
315         spin_unlock_bh(&priv->ring[ring].egress_lock);
316         return ret;
317 }
318
319 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
320 {
321         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
322         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
323         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
324         unsigned int state_w_sz = req->state_sz / sizeof(u32);
325         int i;
326
327         for (i = 0; i < state_w_sz; i++)
328                 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
329                         return true;
330
331         if (ctx->base.ctxr->data[state_w_sz] !=
332             cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
333                 return true;
334
335         return false;
336 }
337
338 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
339                                       int ring,
340                                       struct crypto_async_request *async,
341                                       bool *should_complete, int *ret)
342 {
343         struct safexcel_result_desc *rdesc;
344         struct ahash_request *areq = ahash_request_cast(async);
345         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
346         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
347         int enq_ret;
348
349         *ret = 0;
350
351         spin_lock_bh(&priv->ring[ring].egress_lock);
352         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
353         if (IS_ERR(rdesc)) {
354                 dev_err(priv->dev,
355                         "hash: invalidate: could not retrieve the result descriptor\n");
356                 *ret = PTR_ERR(rdesc);
357         } else if (rdesc->result_data.error_code) {
358                 dev_err(priv->dev,
359                         "hash: invalidate: result descriptor error (%d)\n",
360                         rdesc->result_data.error_code);
361                 *ret = -EINVAL;
362         }
363
364         safexcel_complete(priv, ring);
365         spin_unlock_bh(&priv->ring[ring].egress_lock);
366
367         if (ctx->base.exit_inv) {
368                 dma_pool_free(priv->context_pool, ctx->base.ctxr,
369                               ctx->base.ctxr_dma);
370
371                 *should_complete = true;
372                 return 1;
373         }
374
375         ring = safexcel_select_ring(priv);
376         ctx->base.ring = ring;
377
378         spin_lock_bh(&priv->ring[ring].queue_lock);
379         enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
380         spin_unlock_bh(&priv->ring[ring].queue_lock);
381
382         if (enq_ret != -EINPROGRESS)
383                 *ret = enq_ret;
384
385         if (!priv->ring[ring].need_dequeue)
386                 safexcel_dequeue(priv, ring);
387
388         *should_complete = false;
389
390         return 1;
391 }
392
393 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
394                                   struct crypto_async_request *async,
395                                   bool *should_complete, int *ret)
396 {
397         struct ahash_request *areq = ahash_request_cast(async);
398         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
399         int err;
400
401         if (req->needs_inv) {
402                 req->needs_inv = false;
403                 err = safexcel_handle_inv_result(priv, ring, async,
404                                                  should_complete, ret);
405         } else {
406                 err = safexcel_handle_req_result(priv, ring, async,
407                                                  should_complete, ret);
408         }
409
410         return err;
411 }
412
413 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
414                                    int ring, struct safexcel_request *request,
415                                    int *commands, int *results)
416 {
417         struct ahash_request *areq = ahash_request_cast(async);
418         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
419         int ret;
420
421         ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
422                                         ctx->base.ctxr_dma, ring, request);
423         if (unlikely(ret))
424                 return ret;
425
426         *commands = 1;
427         *results = 1;
428
429         return 0;
430 }
431
432 static int safexcel_ahash_send(struct crypto_async_request *async,
433                                int ring, struct safexcel_request *request,
434                                int *commands, int *results)
435 {
436         struct ahash_request *areq = ahash_request_cast(async);
437         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
438         int ret;
439
440         if (req->needs_inv)
441                 ret = safexcel_ahash_send_inv(async, ring, request,
442                                               commands, results);
443         else
444                 ret = safexcel_ahash_send_req(async, ring, request,
445                                               commands, results);
446         return ret;
447 }
448
449 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
450 {
451         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
452         struct safexcel_crypto_priv *priv = ctx->priv;
453         AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
454         struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
455         struct safexcel_inv_result result = {};
456         int ring = ctx->base.ring;
457
458         memset(req, 0, sizeof(struct ahash_request));
459
460         /* create invalidation request */
461         init_completion(&result.completion);
462         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
463                                    safexcel_inv_complete, &result);
464
465         ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
466         ctx = crypto_tfm_ctx(req->base.tfm);
467         ctx->base.exit_inv = true;
468         rctx->needs_inv = true;
469
470         spin_lock_bh(&priv->ring[ring].queue_lock);
471         crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
472         spin_unlock_bh(&priv->ring[ring].queue_lock);
473
474         if (!priv->ring[ring].need_dequeue)
475                 safexcel_dequeue(priv, ring);
476
477         wait_for_completion_interruptible(&result.completion);
478
479         if (result.error) {
480                 dev_warn(priv->dev, "hash: completion error (%d)\n",
481                          result.error);
482                 return result.error;
483         }
484
485         return 0;
486 }
487
488 static int safexcel_ahash_cache(struct ahash_request *areq)
489 {
490         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
491         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
492         int queued, cache_len;
493
494         cache_len = req->len - areq->nbytes - req->processed;
495         queued = req->len - req->processed;
496
497         /*
498          * In case there isn't enough bytes to proceed (less than a
499          * block size), cache the data until we have enough.
500          */
501         if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
502                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
503                                    req->cache + cache_len,
504                                    areq->nbytes, 0);
505                 return areq->nbytes;
506         }
507
508         /* We could'nt cache all the data */
509         return -E2BIG;
510 }
511
512 static int safexcel_ahash_enqueue(struct ahash_request *areq)
513 {
514         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
515         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
516         struct safexcel_crypto_priv *priv = ctx->priv;
517         int ret, ring;
518
519         req->needs_inv = false;
520
521         if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
522                 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
523
524         if (ctx->base.ctxr) {
525                 if (ctx->base.needs_inv) {
526                         ctx->base.needs_inv = false;
527                         req->needs_inv = true;
528                 }
529         } else {
530                 ctx->base.ring = safexcel_select_ring(priv);
531                 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
532                                                  EIP197_GFP_FLAGS(areq->base),
533                                                  &ctx->base.ctxr_dma);
534                 if (!ctx->base.ctxr)
535                         return -ENOMEM;
536         }
537
538         ring = ctx->base.ring;
539
540         spin_lock_bh(&priv->ring[ring].queue_lock);
541         ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
542         spin_unlock_bh(&priv->ring[ring].queue_lock);
543
544         if (!priv->ring[ring].need_dequeue)
545                 safexcel_dequeue(priv, ring);
546
547         return ret;
548 }
549
550 static int safexcel_ahash_update(struct ahash_request *areq)
551 {
552         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
553         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
554         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
555
556         /* If the request is 0 length, do nothing */
557         if (!areq->nbytes)
558                 return 0;
559
560         req->len += areq->nbytes;
561
562         safexcel_ahash_cache(areq);
563
564         /*
565          * We're not doing partial updates when performing an hmac request.
566          * Everything will be handled by the final() call.
567          */
568         if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
569                 return 0;
570
571         if (req->hmac)
572                 return safexcel_ahash_enqueue(areq);
573
574         if (!req->last_req &&
575             req->len - req->processed > crypto_ahash_blocksize(ahash))
576                 return safexcel_ahash_enqueue(areq);
577
578         return 0;
579 }
580
581 static int safexcel_ahash_final(struct ahash_request *areq)
582 {
583         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
584         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
585
586         req->last_req = true;
587         req->finish = true;
588
589         /* If we have an overall 0 length request */
590         if (!(req->len + areq->nbytes)) {
591                 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
592                         memcpy(areq->result, sha1_zero_message_hash,
593                                SHA1_DIGEST_SIZE);
594                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
595                         memcpy(areq->result, sha224_zero_message_hash,
596                                SHA224_DIGEST_SIZE);
597                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
598                         memcpy(areq->result, sha256_zero_message_hash,
599                                SHA256_DIGEST_SIZE);
600
601                 return 0;
602         }
603
604         return safexcel_ahash_enqueue(areq);
605 }
606
607 static int safexcel_ahash_finup(struct ahash_request *areq)
608 {
609         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
610
611         req->last_req = true;
612         req->finish = true;
613
614         safexcel_ahash_update(areq);
615         return safexcel_ahash_final(areq);
616 }
617
618 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
619 {
620         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
621         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
622         struct safexcel_ahash_export_state *export = out;
623
624         export->len = req->len;
625         export->processed = req->processed;
626
627         memcpy(export->state, req->state, req->state_sz);
628         memset(export->cache, 0, crypto_ahash_blocksize(ahash));
629         memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
630
631         return 0;
632 }
633
634 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
635 {
636         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
637         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
638         const struct safexcel_ahash_export_state *export = in;
639         int ret;
640
641         ret = crypto_ahash_init(areq);
642         if (ret)
643                 return ret;
644
645         req->len = export->len;
646         req->processed = export->processed;
647
648         memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
649         memcpy(req->state, export->state, req->state_sz);
650
651         return 0;
652 }
653
654 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
655 {
656         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
657         struct safexcel_alg_template *tmpl =
658                 container_of(__crypto_ahash_alg(tfm->__crt_alg),
659                              struct safexcel_alg_template, alg.ahash);
660
661         ctx->priv = tmpl->priv;
662         ctx->base.send = safexcel_ahash_send;
663         ctx->base.handle_result = safexcel_handle_result;
664
665         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
666                                  sizeof(struct safexcel_ahash_req));
667         return 0;
668 }
669
670 static int safexcel_sha1_init(struct ahash_request *areq)
671 {
672         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
673         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
674
675         memset(req, 0, sizeof(*req));
676
677         req->state[0] = SHA1_H0;
678         req->state[1] = SHA1_H1;
679         req->state[2] = SHA1_H2;
680         req->state[3] = SHA1_H3;
681         req->state[4] = SHA1_H4;
682
683         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
684         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
685         req->state_sz = SHA1_DIGEST_SIZE;
686
687         return 0;
688 }
689
690 static int safexcel_sha1_digest(struct ahash_request *areq)
691 {
692         int ret = safexcel_sha1_init(areq);
693
694         if (ret)
695                 return ret;
696
697         return safexcel_ahash_finup(areq);
698 }
699
700 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
701 {
702         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
703         struct safexcel_crypto_priv *priv = ctx->priv;
704         int ret;
705
706         /* context not allocated, skip invalidation */
707         if (!ctx->base.ctxr)
708                 return;
709
710         ret = safexcel_ahash_exit_inv(tfm);
711         if (ret)
712                 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
713 }
714
715 struct safexcel_alg_template safexcel_alg_sha1 = {
716         .type = SAFEXCEL_ALG_TYPE_AHASH,
717         .alg.ahash = {
718                 .init = safexcel_sha1_init,
719                 .update = safexcel_ahash_update,
720                 .final = safexcel_ahash_final,
721                 .finup = safexcel_ahash_finup,
722                 .digest = safexcel_sha1_digest,
723                 .export = safexcel_ahash_export,
724                 .import = safexcel_ahash_import,
725                 .halg = {
726                         .digestsize = SHA1_DIGEST_SIZE,
727                         .statesize = sizeof(struct safexcel_ahash_export_state),
728                         .base = {
729                                 .cra_name = "sha1",
730                                 .cra_driver_name = "safexcel-sha1",
731                                 .cra_priority = 300,
732                                 .cra_flags = CRYPTO_ALG_ASYNC |
733                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
734                                 .cra_blocksize = SHA1_BLOCK_SIZE,
735                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
736                                 .cra_init = safexcel_ahash_cra_init,
737                                 .cra_exit = safexcel_ahash_cra_exit,
738                                 .cra_module = THIS_MODULE,
739                         },
740                 },
741         },
742 };
743
744 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
745 {
746         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
747
748         safexcel_sha1_init(areq);
749         ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
750         return 0;
751 }
752
753 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
754 {
755         int ret = safexcel_hmac_sha1_init(areq);
756
757         if (ret)
758                 return ret;
759
760         return safexcel_ahash_finup(areq);
761 }
762
763 struct safexcel_ahash_result {
764         struct completion completion;
765         int error;
766 };
767
768 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
769 {
770         struct safexcel_ahash_result *result = req->data;
771
772         if (error == -EINPROGRESS)
773                 return;
774
775         result->error = error;
776         complete(&result->completion);
777 }
778
779 static int safexcel_hmac_init_pad(struct ahash_request *areq,
780                                   unsigned int blocksize, const u8 *key,
781                                   unsigned int keylen, u8 *ipad, u8 *opad)
782 {
783         struct safexcel_ahash_result result;
784         struct scatterlist sg;
785         int ret, i;
786         u8 *keydup;
787
788         if (keylen <= blocksize) {
789                 memcpy(ipad, key, keylen);
790         } else {
791                 keydup = kmemdup(key, keylen, GFP_KERNEL);
792                 if (!keydup)
793                         return -ENOMEM;
794
795                 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
796                                            safexcel_ahash_complete, &result);
797                 sg_init_one(&sg, keydup, keylen);
798                 ahash_request_set_crypt(areq, &sg, ipad, keylen);
799                 init_completion(&result.completion);
800
801                 ret = crypto_ahash_digest(areq);
802                 if (ret == -EINPROGRESS) {
803                         wait_for_completion_interruptible(&result.completion);
804                         ret = result.error;
805                 }
806
807                 /* Avoid leaking */
808                 memzero_explicit(keydup, keylen);
809                 kfree(keydup);
810
811                 if (ret)
812                         return ret;
813
814                 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
815         }
816
817         memset(ipad + keylen, 0, blocksize - keylen);
818         memcpy(opad, ipad, blocksize);
819
820         for (i = 0; i < blocksize; i++) {
821                 ipad[i] ^= HMAC_IPAD_VALUE;
822                 opad[i] ^= HMAC_OPAD_VALUE;
823         }
824
825         return 0;
826 }
827
828 static int safexcel_hmac_init_iv(struct ahash_request *areq,
829                                  unsigned int blocksize, u8 *pad, void *state)
830 {
831         struct safexcel_ahash_result result;
832         struct safexcel_ahash_req *req;
833         struct scatterlist sg;
834         int ret;
835
836         ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
837                                    safexcel_ahash_complete, &result);
838         sg_init_one(&sg, pad, blocksize);
839         ahash_request_set_crypt(areq, &sg, pad, blocksize);
840         init_completion(&result.completion);
841
842         ret = crypto_ahash_init(areq);
843         if (ret)
844                 return ret;
845
846         req = ahash_request_ctx(areq);
847         req->hmac = true;
848         req->last_req = true;
849
850         ret = crypto_ahash_update(areq);
851         if (ret && ret != -EINPROGRESS)
852                 return ret;
853
854         wait_for_completion_interruptible(&result.completion);
855         if (result.error)
856                 return result.error;
857
858         return crypto_ahash_export(areq, state);
859 }
860
861 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
862                                 unsigned int keylen, void *istate, void *ostate)
863 {
864         struct ahash_request *areq;
865         struct crypto_ahash *tfm;
866         unsigned int blocksize;
867         u8 *ipad, *opad;
868         int ret;
869
870         tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
871                                  CRYPTO_ALG_TYPE_AHASH_MASK);
872         if (IS_ERR(tfm))
873                 return PTR_ERR(tfm);
874
875         areq = ahash_request_alloc(tfm, GFP_KERNEL);
876         if (!areq) {
877                 ret = -ENOMEM;
878                 goto free_ahash;
879         }
880
881         crypto_ahash_clear_flags(tfm, ~0);
882         blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
883
884         ipad = kzalloc(2 * blocksize, GFP_KERNEL);
885         if (!ipad) {
886                 ret = -ENOMEM;
887                 goto free_request;
888         }
889
890         opad = ipad + blocksize;
891
892         ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
893         if (ret)
894                 goto free_ipad;
895
896         ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
897         if (ret)
898                 goto free_ipad;
899
900         ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
901
902 free_ipad:
903         kfree(ipad);
904 free_request:
905         ahash_request_free(areq);
906 free_ahash:
907         crypto_free_ahash(tfm);
908
909         return ret;
910 }
911
912 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
913                                      unsigned int keylen)
914 {
915         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
916         struct safexcel_ahash_export_state istate, ostate;
917         int ret, i;
918
919         ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
920         if (ret)
921                 return ret;
922
923         for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
924                 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
925                     ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
926                         ctx->base.needs_inv = true;
927                         break;
928                 }
929         }
930
931         memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
932         memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
933
934         return 0;
935 }
936
937 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
938         .type = SAFEXCEL_ALG_TYPE_AHASH,
939         .alg.ahash = {
940                 .init = safexcel_hmac_sha1_init,
941                 .update = safexcel_ahash_update,
942                 .final = safexcel_ahash_final,
943                 .finup = safexcel_ahash_finup,
944                 .digest = safexcel_hmac_sha1_digest,
945                 .setkey = safexcel_hmac_sha1_setkey,
946                 .export = safexcel_ahash_export,
947                 .import = safexcel_ahash_import,
948                 .halg = {
949                         .digestsize = SHA1_DIGEST_SIZE,
950                         .statesize = sizeof(struct safexcel_ahash_export_state),
951                         .base = {
952                                 .cra_name = "hmac(sha1)",
953                                 .cra_driver_name = "safexcel-hmac-sha1",
954                                 .cra_priority = 300,
955                                 .cra_flags = CRYPTO_ALG_ASYNC |
956                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
957                                 .cra_blocksize = SHA1_BLOCK_SIZE,
958                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
959                                 .cra_init = safexcel_ahash_cra_init,
960                                 .cra_exit = safexcel_ahash_cra_exit,
961                                 .cra_module = THIS_MODULE,
962                         },
963                 },
964         },
965 };
966
967 static int safexcel_sha256_init(struct ahash_request *areq)
968 {
969         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
970         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
971
972         memset(req, 0, sizeof(*req));
973
974         req->state[0] = SHA256_H0;
975         req->state[1] = SHA256_H1;
976         req->state[2] = SHA256_H2;
977         req->state[3] = SHA256_H3;
978         req->state[4] = SHA256_H4;
979         req->state[5] = SHA256_H5;
980         req->state[6] = SHA256_H6;
981         req->state[7] = SHA256_H7;
982
983         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
984         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
985         req->state_sz = SHA256_DIGEST_SIZE;
986
987         return 0;
988 }
989
990 static int safexcel_sha256_digest(struct ahash_request *areq)
991 {
992         int ret = safexcel_sha256_init(areq);
993
994         if (ret)
995                 return ret;
996
997         return safexcel_ahash_finup(areq);
998 }
999
1000 struct safexcel_alg_template safexcel_alg_sha256 = {
1001         .type = SAFEXCEL_ALG_TYPE_AHASH,
1002         .alg.ahash = {
1003                 .init = safexcel_sha256_init,
1004                 .update = safexcel_ahash_update,
1005                 .final = safexcel_ahash_final,
1006                 .finup = safexcel_ahash_finup,
1007                 .digest = safexcel_sha256_digest,
1008                 .export = safexcel_ahash_export,
1009                 .import = safexcel_ahash_import,
1010                 .halg = {
1011                         .digestsize = SHA256_DIGEST_SIZE,
1012                         .statesize = sizeof(struct safexcel_ahash_export_state),
1013                         .base = {
1014                                 .cra_name = "sha256",
1015                                 .cra_driver_name = "safexcel-sha256",
1016                                 .cra_priority = 300,
1017                                 .cra_flags = CRYPTO_ALG_ASYNC |
1018                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1019                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1020                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1021                                 .cra_init = safexcel_ahash_cra_init,
1022                                 .cra_exit = safexcel_ahash_cra_exit,
1023                                 .cra_module = THIS_MODULE,
1024                         },
1025                 },
1026         },
1027 };
1028
1029 static int safexcel_sha224_init(struct ahash_request *areq)
1030 {
1031         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1032         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1033
1034         memset(req, 0, sizeof(*req));
1035
1036         req->state[0] = SHA224_H0;
1037         req->state[1] = SHA224_H1;
1038         req->state[2] = SHA224_H2;
1039         req->state[3] = SHA224_H3;
1040         req->state[4] = SHA224_H4;
1041         req->state[5] = SHA224_H5;
1042         req->state[6] = SHA224_H6;
1043         req->state[7] = SHA224_H7;
1044
1045         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1046         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1047         req->state_sz = SHA256_DIGEST_SIZE;
1048
1049         return 0;
1050 }
1051
1052 static int safexcel_sha224_digest(struct ahash_request *areq)
1053 {
1054         int ret = safexcel_sha224_init(areq);
1055
1056         if (ret)
1057                 return ret;
1058
1059         return safexcel_ahash_finup(areq);
1060 }
1061
1062 struct safexcel_alg_template safexcel_alg_sha224 = {
1063         .type = SAFEXCEL_ALG_TYPE_AHASH,
1064         .alg.ahash = {
1065                 .init = safexcel_sha224_init,
1066                 .update = safexcel_ahash_update,
1067                 .final = safexcel_ahash_final,
1068                 .finup = safexcel_ahash_finup,
1069                 .digest = safexcel_sha224_digest,
1070                 .export = safexcel_ahash_export,
1071                 .import = safexcel_ahash_import,
1072                 .halg = {
1073                         .digestsize = SHA224_DIGEST_SIZE,
1074                         .statesize = sizeof(struct safexcel_ahash_export_state),
1075                         .base = {
1076                                 .cra_name = "sha224",
1077                                 .cra_driver_name = "safexcel-sha224",
1078                                 .cra_priority = 300,
1079                                 .cra_flags = CRYPTO_ALG_ASYNC |
1080                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1081                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1082                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1083                                 .cra_init = safexcel_ahash_cra_init,
1084                                 .cra_exit = safexcel_ahash_cra_exit,
1085                                 .cra_module = THIS_MODULE,
1086                         },
1087                 },
1088         },
1089 };