ALSA: hda/realtek - Move some alc236 pintbls to fallback table
[sfrench/cifs-2.6.git] / include / crypto / sha1_base.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * sha1_base.h - core logic for SHA-1 implementations
4  *
5  * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
6  */
7
8 #ifndef _CRYPTO_SHA1_BASE_H
9 #define _CRYPTO_SHA1_BASE_H
10
11 #include <crypto/internal/hash.h>
12 #include <crypto/sha.h>
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15
16 #include <asm/unaligned.h>
17
18 typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
19
20 static inline int sha1_base_init(struct shash_desc *desc)
21 {
22         struct sha1_state *sctx = shash_desc_ctx(desc);
23
24         sctx->state[0] = SHA1_H0;
25         sctx->state[1] = SHA1_H1;
26         sctx->state[2] = SHA1_H2;
27         sctx->state[3] = SHA1_H3;
28         sctx->state[4] = SHA1_H4;
29         sctx->count = 0;
30
31         return 0;
32 }
33
34 static inline int sha1_base_do_update(struct shash_desc *desc,
35                                       const u8 *data,
36                                       unsigned int len,
37                                       sha1_block_fn *block_fn)
38 {
39         struct sha1_state *sctx = shash_desc_ctx(desc);
40         unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
41
42         sctx->count += len;
43
44         if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
45                 int blocks;
46
47                 if (partial) {
48                         int p = SHA1_BLOCK_SIZE - partial;
49
50                         memcpy(sctx->buffer + partial, data, p);
51                         data += p;
52                         len -= p;
53
54                         block_fn(sctx, sctx->buffer, 1);
55                 }
56
57                 blocks = len / SHA1_BLOCK_SIZE;
58                 len %= SHA1_BLOCK_SIZE;
59
60                 if (blocks) {
61                         block_fn(sctx, data, blocks);
62                         data += blocks * SHA1_BLOCK_SIZE;
63                 }
64                 partial = 0;
65         }
66         if (len)
67                 memcpy(sctx->buffer + partial, data, len);
68
69         return 0;
70 }
71
72 static inline int sha1_base_do_finalize(struct shash_desc *desc,
73                                         sha1_block_fn *block_fn)
74 {
75         const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
76         struct sha1_state *sctx = shash_desc_ctx(desc);
77         __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
78         unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
79
80         sctx->buffer[partial++] = 0x80;
81         if (partial > bit_offset) {
82                 memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
83                 partial = 0;
84
85                 block_fn(sctx, sctx->buffer, 1);
86         }
87
88         memset(sctx->buffer + partial, 0x0, bit_offset - partial);
89         *bits = cpu_to_be64(sctx->count << 3);
90         block_fn(sctx, sctx->buffer, 1);
91
92         return 0;
93 }
94
95 static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
96 {
97         struct sha1_state *sctx = shash_desc_ctx(desc);
98         __be32 *digest = (__be32 *)out;
99         int i;
100
101         for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
102                 put_unaligned_be32(sctx->state[i], digest++);
103
104         *sctx = (struct sha1_state){};
105         return 0;
106 }
107
108 #endif /* _CRYPTO_SHA1_BASE_H */