Merge tag 'clang-format-for-linus-v4.19' of git://github.com/ojeda/linux
[sfrench/cifs-2.6.git] / arch / arm64 / crypto / sha256-glue.c
1 /*
2  * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
3  *
4  * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12
13 #include <asm/hwcap.h>
14 #include <asm/neon.h>
15 #include <asm/simd.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/sha.h>
18 #include <crypto/sha256_base.h>
19 #include <linux/cryptohash.h>
20 #include <linux/types.h>
21 #include <linux/string.h>
22
23 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
24 MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
25 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
26 MODULE_LICENSE("GPL v2");
27 MODULE_ALIAS_CRYPTO("sha224");
28 MODULE_ALIAS_CRYPTO("sha256");
29
30 asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
31                                         unsigned int num_blks);
32 EXPORT_SYMBOL(sha256_block_data_order);
33
34 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
35                                   unsigned int num_blks);
36
37 static int sha256_update(struct shash_desc *desc, const u8 *data,
38                          unsigned int len)
39 {
40         return sha256_base_do_update(desc, data, len,
41                                 (sha256_block_fn *)sha256_block_data_order);
42 }
43
44 static int sha256_finup(struct shash_desc *desc, const u8 *data,
45                         unsigned int len, u8 *out)
46 {
47         if (len)
48                 sha256_base_do_update(desc, data, len,
49                                 (sha256_block_fn *)sha256_block_data_order);
50         sha256_base_do_finalize(desc,
51                                 (sha256_block_fn *)sha256_block_data_order);
52
53         return sha256_base_finish(desc, out);
54 }
55
56 static int sha256_final(struct shash_desc *desc, u8 *out)
57 {
58         return sha256_finup(desc, NULL, 0, out);
59 }
60
61 static struct shash_alg algs[] = { {
62         .digestsize             = SHA256_DIGEST_SIZE,
63         .init                   = sha256_base_init,
64         .update                 = sha256_update,
65         .final                  = sha256_final,
66         .finup                  = sha256_finup,
67         .descsize               = sizeof(struct sha256_state),
68         .base.cra_name          = "sha256",
69         .base.cra_driver_name   = "sha256-arm64",
70         .base.cra_priority      = 125,
71         .base.cra_blocksize     = SHA256_BLOCK_SIZE,
72         .base.cra_module        = THIS_MODULE,
73 }, {
74         .digestsize             = SHA224_DIGEST_SIZE,
75         .init                   = sha224_base_init,
76         .update                 = sha256_update,
77         .final                  = sha256_final,
78         .finup                  = sha256_finup,
79         .descsize               = sizeof(struct sha256_state),
80         .base.cra_name          = "sha224",
81         .base.cra_driver_name   = "sha224-arm64",
82         .base.cra_priority      = 125,
83         .base.cra_blocksize     = SHA224_BLOCK_SIZE,
84         .base.cra_module        = THIS_MODULE,
85 } };
86
87 static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
88                               unsigned int len)
89 {
90         struct sha256_state *sctx = shash_desc_ctx(desc);
91
92         if (!may_use_simd())
93                 return sha256_base_do_update(desc, data, len,
94                                 (sha256_block_fn *)sha256_block_data_order);
95
96         while (len > 0) {
97                 unsigned int chunk = len;
98
99                 /*
100                  * Don't hog the CPU for the entire time it takes to process all
101                  * input when running on a preemptible kernel, but process the
102                  * data block by block instead.
103                  */
104                 if (IS_ENABLED(CONFIG_PREEMPT) &&
105                     chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
106                         chunk = SHA256_BLOCK_SIZE -
107                                 sctx->count % SHA256_BLOCK_SIZE;
108
109                 kernel_neon_begin();
110                 sha256_base_do_update(desc, data, chunk,
111                                       (sha256_block_fn *)sha256_block_neon);
112                 kernel_neon_end();
113                 data += chunk;
114                 len -= chunk;
115         }
116         return 0;
117 }
118
119 static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
120                              unsigned int len, u8 *out)
121 {
122         if (!may_use_simd()) {
123                 if (len)
124                         sha256_base_do_update(desc, data, len,
125                                 (sha256_block_fn *)sha256_block_data_order);
126                 sha256_base_do_finalize(desc,
127                                 (sha256_block_fn *)sha256_block_data_order);
128         } else {
129                 if (len)
130                         sha256_update_neon(desc, data, len);
131                 kernel_neon_begin();
132                 sha256_base_do_finalize(desc,
133                                 (sha256_block_fn *)sha256_block_neon);
134                 kernel_neon_end();
135         }
136         return sha256_base_finish(desc, out);
137 }
138
139 static int sha256_final_neon(struct shash_desc *desc, u8 *out)
140 {
141         return sha256_finup_neon(desc, NULL, 0, out);
142 }
143
144 static struct shash_alg neon_algs[] = { {
145         .digestsize             = SHA256_DIGEST_SIZE,
146         .init                   = sha256_base_init,
147         .update                 = sha256_update_neon,
148         .final                  = sha256_final_neon,
149         .finup                  = sha256_finup_neon,
150         .descsize               = sizeof(struct sha256_state),
151         .base.cra_name          = "sha256",
152         .base.cra_driver_name   = "sha256-arm64-neon",
153         .base.cra_priority      = 150,
154         .base.cra_blocksize     = SHA256_BLOCK_SIZE,
155         .base.cra_module        = THIS_MODULE,
156 }, {
157         .digestsize             = SHA224_DIGEST_SIZE,
158         .init                   = sha224_base_init,
159         .update                 = sha256_update_neon,
160         .final                  = sha256_final_neon,
161         .finup                  = sha256_finup_neon,
162         .descsize               = sizeof(struct sha256_state),
163         .base.cra_name          = "sha224",
164         .base.cra_driver_name   = "sha224-arm64-neon",
165         .base.cra_priority      = 150,
166         .base.cra_blocksize     = SHA224_BLOCK_SIZE,
167         .base.cra_module        = THIS_MODULE,
168 } };
169
170 static int __init sha256_mod_init(void)
171 {
172         int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
173         if (ret)
174                 return ret;
175
176         if (elf_hwcap & HWCAP_ASIMD) {
177                 ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
178                 if (ret)
179                         crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
180         }
181         return ret;
182 }
183
184 static void __exit sha256_mod_fini(void)
185 {
186         if (elf_hwcap & HWCAP_ASIMD)
187                 crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
188         crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
189 }
190
191 module_init(sha256_mod_init);
192 module_exit(sha256_mod_fini);