Merge branch 'nvme-4.18' of git://git.infradead.org/nvme into for-linus
[sfrench/cifs-2.6.git] / arch / x86 / crypto / morus640_glue.c
1 /*
2  * The MORUS-640 Authenticated-Encryption Algorithm
3  *   Common x86 SIMD glue skeleton
4  *
5  * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6  * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  */
13
14 #include <crypto/cryptd.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/morus640_glue.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <asm/fpu/api.h>
25
26 struct morus640_state {
27         struct morus640_block s[MORUS_STATE_BLOCKS];
28 };
29
30 struct morus640_ops {
31         int (*skcipher_walk_init)(struct skcipher_walk *walk,
32                                   struct aead_request *req, bool atomic);
33
34         void (*crypt_blocks)(void *state, const void *src, void *dst,
35                              unsigned int length);
36         void (*crypt_tail)(void *state, const void *src, void *dst,
37                            unsigned int length);
38 };
39
40 static void crypto_morus640_glue_process_ad(
41                 struct morus640_state *state,
42                 const struct morus640_glue_ops *ops,
43                 struct scatterlist *sg_src, unsigned int assoclen)
44 {
45         struct scatter_walk walk;
46         struct morus640_block buf;
47         unsigned int pos = 0;
48
49         scatterwalk_start(&walk, sg_src);
50         while (assoclen != 0) {
51                 unsigned int size = scatterwalk_clamp(&walk, assoclen);
52                 unsigned int left = size;
53                 void *mapped = scatterwalk_map(&walk);
54                 const u8 *src = (const u8 *)mapped;
55
56                 if (pos + size >= MORUS640_BLOCK_SIZE) {
57                         if (pos > 0) {
58                                 unsigned int fill = MORUS640_BLOCK_SIZE - pos;
59                                 memcpy(buf.bytes + pos, src, fill);
60                                 ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE);
61                                 pos = 0;
62                                 left -= fill;
63                                 src += fill;
64                         }
65
66                         ops->ad(state, src, left);
67                         src += left & ~(MORUS640_BLOCK_SIZE - 1);
68                         left &= MORUS640_BLOCK_SIZE - 1;
69                 }
70
71                 memcpy(buf.bytes + pos, src, left);
72
73                 pos += left;
74                 assoclen -= size;
75                 scatterwalk_unmap(mapped);
76                 scatterwalk_advance(&walk, size);
77                 scatterwalk_done(&walk, 0, assoclen);
78         }
79
80         if (pos > 0) {
81                 memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos);
82                 ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE);
83         }
84 }
85
86 static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
87                                                struct morus640_ops ops,
88                                                struct aead_request *req)
89 {
90         struct skcipher_walk walk;
91         u8 *cursor_src, *cursor_dst;
92         unsigned int chunksize, base;
93
94         ops.skcipher_walk_init(&walk, req, false);
95
96         while (walk.nbytes) {
97                 cursor_src = walk.src.virt.addr;
98                 cursor_dst = walk.dst.virt.addr;
99                 chunksize = walk.nbytes;
100
101                 ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
102
103                 base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
104                 cursor_src += base;
105                 cursor_dst += base;
106                 chunksize &= MORUS640_BLOCK_SIZE - 1;
107
108                 if (chunksize > 0)
109                         ops.crypt_tail(state, cursor_src, cursor_dst,
110                                        chunksize);
111
112                 skcipher_walk_done(&walk, 0);
113         }
114 }
115
116 int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
117                                 unsigned int keylen)
118 {
119         struct morus640_ctx *ctx = crypto_aead_ctx(aead);
120
121         if (keylen != MORUS640_BLOCK_SIZE) {
122                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
123                 return -EINVAL;
124         }
125
126         memcpy(ctx->key.bytes, key, MORUS640_BLOCK_SIZE);
127         return 0;
128 }
129 EXPORT_SYMBOL_GPL(crypto_morus640_glue_setkey);
130
131 int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
132                                      unsigned int authsize)
133 {
134         return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
135 }
136 EXPORT_SYMBOL_GPL(crypto_morus640_glue_setauthsize);
137
138 static void crypto_morus640_glue_crypt(struct aead_request *req,
139                                        struct morus640_ops ops,
140                                        unsigned int cryptlen,
141                                        struct morus640_block *tag_xor)
142 {
143         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
144         struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
145         struct morus640_state state;
146
147         kernel_fpu_begin();
148
149         ctx->ops->init(&state, &ctx->key, req->iv);
150         crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
151         crypto_morus640_glue_process_crypt(&state, ops, req);
152         ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
153
154         kernel_fpu_end();
155 }
156
157 int crypto_morus640_glue_encrypt(struct aead_request *req)
158 {
159         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
160         struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
161         struct morus640_ops OPS = {
162                 .skcipher_walk_init = skcipher_walk_aead_encrypt,
163                 .crypt_blocks = ctx->ops->enc,
164                 .crypt_tail = ctx->ops->enc_tail,
165         };
166
167         struct morus640_block tag = {};
168         unsigned int authsize = crypto_aead_authsize(tfm);
169         unsigned int cryptlen = req->cryptlen;
170
171         crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag);
172
173         scatterwalk_map_and_copy(tag.bytes, req->dst,
174                                  req->assoclen + cryptlen, authsize, 1);
175         return 0;
176 }
177 EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt);
178
179 int crypto_morus640_glue_decrypt(struct aead_request *req)
180 {
181         static const u8 zeros[MORUS640_BLOCK_SIZE] = {};
182
183         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
184         struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
185         struct morus640_ops OPS = {
186                 .skcipher_walk_init = skcipher_walk_aead_decrypt,
187                 .crypt_blocks = ctx->ops->dec,
188                 .crypt_tail = ctx->ops->dec_tail,
189         };
190
191         struct morus640_block tag;
192         unsigned int authsize = crypto_aead_authsize(tfm);
193         unsigned int cryptlen = req->cryptlen - authsize;
194
195         scatterwalk_map_and_copy(tag.bytes, req->src,
196                                  req->assoclen + cryptlen, authsize, 0);
197
198         crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag);
199
200         return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
201 }
202 EXPORT_SYMBOL_GPL(crypto_morus640_glue_decrypt);
203
204 void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
205                                    const struct morus640_glue_ops *ops)
206 {
207         struct morus640_ctx *ctx = crypto_aead_ctx(aead);
208         ctx->ops = ops;
209 }
210 EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
211
212 int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
213                                 unsigned int keylen)
214 {
215         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
216         struct cryptd_aead *cryptd_tfm = *ctx;
217
218         return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
219 }
220 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey);
221
222 int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
223                                      unsigned int authsize)
224 {
225         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
226         struct cryptd_aead *cryptd_tfm = *ctx;
227
228         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
229 }
230 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize);
231
232 int cryptd_morus640_glue_encrypt(struct aead_request *req)
233 {
234         struct crypto_aead *aead = crypto_aead_reqtfm(req);
235         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
236         struct cryptd_aead *cryptd_tfm = *ctx;
237
238         aead = &cryptd_tfm->base;
239         if (irq_fpu_usable() && (!in_atomic() ||
240                                  !cryptd_aead_queued(cryptd_tfm)))
241                 aead = cryptd_aead_child(cryptd_tfm);
242
243         aead_request_set_tfm(req, aead);
244
245         return crypto_aead_encrypt(req);
246 }
247 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt);
248
249 int cryptd_morus640_glue_decrypt(struct aead_request *req)
250 {
251         struct crypto_aead *aead = crypto_aead_reqtfm(req);
252         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
253         struct cryptd_aead *cryptd_tfm = *ctx;
254
255         aead = &cryptd_tfm->base;
256         if (irq_fpu_usable() && (!in_atomic() ||
257                                  !cryptd_aead_queued(cryptd_tfm)))
258                 aead = cryptd_aead_child(cryptd_tfm);
259
260         aead_request_set_tfm(req, aead);
261
262         return crypto_aead_decrypt(req);
263 }
264 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt);
265
266 int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead)
267 {
268         struct cryptd_aead *cryptd_tfm;
269         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
270         const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
271         char internal_name[CRYPTO_MAX_ALG_NAME];
272
273         if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
274                         >= CRYPTO_MAX_ALG_NAME)
275                 return -ENAMETOOLONG;
276
277         cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
278                                        CRYPTO_ALG_INTERNAL);
279         if (IS_ERR(cryptd_tfm))
280                 return PTR_ERR(cryptd_tfm);
281
282         *ctx = cryptd_tfm;
283         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
284         return 0;
285 }
286 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm);
287
288 void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead)
289 {
290         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
291
292         cryptd_free_aead(*ctx);
293 }
294 EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm);
295
296 MODULE_LICENSE("GPL");
297 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
298 MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");