Merge tag 'rpmsg-v5.3' of git://github.com/andersson/remoteproc
[sfrench/cifs-2.6.git] / arch / s390 / crypto / aes_s390.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Cryptographic API.
4  *
5  * s390 implementation of the AES Cipher Algorithm.
6  *
7  * s390 Version:
8  *   Copyright IBM Corp. 2005, 2017
9  *   Author(s): Jan Glauber (jang@de.ibm.com)
10  *              Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11  *              Patrick Steuer <patrick.steuer@de.ibm.com>
12  *              Harald Freudenberger <freude@de.ibm.com>
13  *
14  * Derived from "crypto/aes_generic.c"
15  */
16
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/err.h>
27 #include <linux/module.h>
28 #include <linux/cpufeature.h>
29 #include <linux/init.h>
30 #include <linux/mutex.h>
31 #include <linux/fips.h>
32 #include <linux/string.h>
33 #include <crypto/xts.h>
34 #include <asm/cpacf.h>
35
36 static u8 *ctrblk;
37 static DEFINE_MUTEX(ctrblk_lock);
38
39 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
40                     kma_functions;
41
42 struct s390_aes_ctx {
43         u8 key[AES_MAX_KEY_SIZE];
44         int key_len;
45         unsigned long fc;
46         union {
47                 struct crypto_sync_skcipher *blk;
48                 struct crypto_cipher *cip;
49         } fallback;
50 };
51
52 struct s390_xts_ctx {
53         u8 key[32];
54         u8 pcc_key[32];
55         int key_len;
56         unsigned long fc;
57         struct crypto_sync_skcipher *fallback;
58 };
59
60 struct gcm_sg_walk {
61         struct scatter_walk walk;
62         unsigned int walk_bytes;
63         u8 *walk_ptr;
64         unsigned int walk_bytes_remain;
65         u8 buf[AES_BLOCK_SIZE];
66         unsigned int buf_bytes;
67         u8 *ptr;
68         unsigned int nbytes;
69 };
70
71 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
72                 unsigned int key_len)
73 {
74         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
75         int ret;
76
77         sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
78         sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
79                         CRYPTO_TFM_REQ_MASK);
80
81         ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
82         if (ret) {
83                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
84                 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
85                                 CRYPTO_TFM_RES_MASK);
86         }
87         return ret;
88 }
89
90 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
91                        unsigned int key_len)
92 {
93         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
94         unsigned long fc;
95
96         /* Pick the correct function code based on the key length */
97         fc = (key_len == 16) ? CPACF_KM_AES_128 :
98              (key_len == 24) ? CPACF_KM_AES_192 :
99              (key_len == 32) ? CPACF_KM_AES_256 : 0;
100
101         /* Check if the function code is available */
102         sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
103         if (!sctx->fc)
104                 return setkey_fallback_cip(tfm, in_key, key_len);
105
106         sctx->key_len = key_len;
107         memcpy(sctx->key, in_key, key_len);
108         return 0;
109 }
110
111 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
112 {
113         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
114
115         if (unlikely(!sctx->fc)) {
116                 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
117                 return;
118         }
119         cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
120 }
121
122 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
123 {
124         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
125
126         if (unlikely(!sctx->fc)) {
127                 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
128                 return;
129         }
130         cpacf_km(sctx->fc | CPACF_DECRYPT,
131                  &sctx->key, out, in, AES_BLOCK_SIZE);
132 }
133
134 static int fallback_init_cip(struct crypto_tfm *tfm)
135 {
136         const char *name = tfm->__crt_alg->cra_name;
137         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
138
139         sctx->fallback.cip = crypto_alloc_cipher(name, 0,
140                                                  CRYPTO_ALG_NEED_FALLBACK);
141
142         if (IS_ERR(sctx->fallback.cip)) {
143                 pr_err("Allocating AES fallback algorithm %s failed\n",
144                        name);
145                 return PTR_ERR(sctx->fallback.cip);
146         }
147
148         return 0;
149 }
150
151 static void fallback_exit_cip(struct crypto_tfm *tfm)
152 {
153         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
154
155         crypto_free_cipher(sctx->fallback.cip);
156         sctx->fallback.cip = NULL;
157 }
158
159 static struct crypto_alg aes_alg = {
160         .cra_name               =       "aes",
161         .cra_driver_name        =       "aes-s390",
162         .cra_priority           =       300,
163         .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER |
164                                         CRYPTO_ALG_NEED_FALLBACK,
165         .cra_blocksize          =       AES_BLOCK_SIZE,
166         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
167         .cra_module             =       THIS_MODULE,
168         .cra_init               =       fallback_init_cip,
169         .cra_exit               =       fallback_exit_cip,
170         .cra_u                  =       {
171                 .cipher = {
172                         .cia_min_keysize        =       AES_MIN_KEY_SIZE,
173                         .cia_max_keysize        =       AES_MAX_KEY_SIZE,
174                         .cia_setkey             =       aes_set_key,
175                         .cia_encrypt            =       aes_encrypt,
176                         .cia_decrypt            =       aes_decrypt,
177                 }
178         }
179 };
180
181 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
182                 unsigned int len)
183 {
184         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
185         unsigned int ret;
186
187         crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
188                                          CRYPTO_TFM_REQ_MASK);
189         crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
190                                                       CRYPTO_TFM_REQ_MASK);
191
192         ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
193
194         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
195         tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
196                           CRYPTO_TFM_RES_MASK;
197
198         return ret;
199 }
200
201 static int fallback_blk_dec(struct blkcipher_desc *desc,
202                 struct scatterlist *dst, struct scatterlist *src,
203                 unsigned int nbytes)
204 {
205         unsigned int ret;
206         struct crypto_blkcipher *tfm = desc->tfm;
207         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
208         SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
209
210         skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
211         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
212         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
213
214         ret = crypto_skcipher_decrypt(req);
215
216         skcipher_request_zero(req);
217         return ret;
218 }
219
220 static int fallback_blk_enc(struct blkcipher_desc *desc,
221                 struct scatterlist *dst, struct scatterlist *src,
222                 unsigned int nbytes)
223 {
224         unsigned int ret;
225         struct crypto_blkcipher *tfm = desc->tfm;
226         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
227         SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
228
229         skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
230         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
231         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
232
233         ret = crypto_skcipher_encrypt(req);
234         return ret;
235 }
236
237 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
238                            unsigned int key_len)
239 {
240         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
241         unsigned long fc;
242
243         /* Pick the correct function code based on the key length */
244         fc = (key_len == 16) ? CPACF_KM_AES_128 :
245              (key_len == 24) ? CPACF_KM_AES_192 :
246              (key_len == 32) ? CPACF_KM_AES_256 : 0;
247
248         /* Check if the function code is available */
249         sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
250         if (!sctx->fc)
251                 return setkey_fallback_blk(tfm, in_key, key_len);
252
253         sctx->key_len = key_len;
254         memcpy(sctx->key, in_key, key_len);
255         return 0;
256 }
257
258 static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
259                          struct blkcipher_walk *walk)
260 {
261         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
262         unsigned int nbytes, n;
263         int ret;
264
265         ret = blkcipher_walk_virt(desc, walk);
266         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
267                 /* only use complete blocks */
268                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
269                 cpacf_km(sctx->fc | modifier, sctx->key,
270                          walk->dst.virt.addr, walk->src.virt.addr, n);
271                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
272         }
273
274         return ret;
275 }
276
277 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
278                            struct scatterlist *dst, struct scatterlist *src,
279                            unsigned int nbytes)
280 {
281         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
282         struct blkcipher_walk walk;
283
284         if (unlikely(!sctx->fc))
285                 return fallback_blk_enc(desc, dst, src, nbytes);
286
287         blkcipher_walk_init(&walk, dst, src, nbytes);
288         return ecb_aes_crypt(desc, 0, &walk);
289 }
290
291 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
292                            struct scatterlist *dst, struct scatterlist *src,
293                            unsigned int nbytes)
294 {
295         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
296         struct blkcipher_walk walk;
297
298         if (unlikely(!sctx->fc))
299                 return fallback_blk_dec(desc, dst, src, nbytes);
300
301         blkcipher_walk_init(&walk, dst, src, nbytes);
302         return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
303 }
304
305 static int fallback_init_blk(struct crypto_tfm *tfm)
306 {
307         const char *name = tfm->__crt_alg->cra_name;
308         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
309
310         sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
311                                                    CRYPTO_ALG_NEED_FALLBACK);
312
313         if (IS_ERR(sctx->fallback.blk)) {
314                 pr_err("Allocating AES fallback algorithm %s failed\n",
315                        name);
316                 return PTR_ERR(sctx->fallback.blk);
317         }
318
319         return 0;
320 }
321
322 static void fallback_exit_blk(struct crypto_tfm *tfm)
323 {
324         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
325
326         crypto_free_sync_skcipher(sctx->fallback.blk);
327 }
328
329 static struct crypto_alg ecb_aes_alg = {
330         .cra_name               =       "ecb(aes)",
331         .cra_driver_name        =       "ecb-aes-s390",
332         .cra_priority           =       401,    /* combo: aes + ecb + 1 */
333         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
334                                         CRYPTO_ALG_NEED_FALLBACK,
335         .cra_blocksize          =       AES_BLOCK_SIZE,
336         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
337         .cra_type               =       &crypto_blkcipher_type,
338         .cra_module             =       THIS_MODULE,
339         .cra_init               =       fallback_init_blk,
340         .cra_exit               =       fallback_exit_blk,
341         .cra_u                  =       {
342                 .blkcipher = {
343                         .min_keysize            =       AES_MIN_KEY_SIZE,
344                         .max_keysize            =       AES_MAX_KEY_SIZE,
345                         .setkey                 =       ecb_aes_set_key,
346                         .encrypt                =       ecb_aes_encrypt,
347                         .decrypt                =       ecb_aes_decrypt,
348                 }
349         }
350 };
351
352 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
353                            unsigned int key_len)
354 {
355         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
356         unsigned long fc;
357
358         /* Pick the correct function code based on the key length */
359         fc = (key_len == 16) ? CPACF_KMC_AES_128 :
360              (key_len == 24) ? CPACF_KMC_AES_192 :
361              (key_len == 32) ? CPACF_KMC_AES_256 : 0;
362
363         /* Check if the function code is available */
364         sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
365         if (!sctx->fc)
366                 return setkey_fallback_blk(tfm, in_key, key_len);
367
368         sctx->key_len = key_len;
369         memcpy(sctx->key, in_key, key_len);
370         return 0;
371 }
372
373 static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
374                          struct blkcipher_walk *walk)
375 {
376         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
377         unsigned int nbytes, n;
378         int ret;
379         struct {
380                 u8 iv[AES_BLOCK_SIZE];
381                 u8 key[AES_MAX_KEY_SIZE];
382         } param;
383
384         ret = blkcipher_walk_virt(desc, walk);
385         memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
386         memcpy(param.key, sctx->key, sctx->key_len);
387         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
388                 /* only use complete blocks */
389                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
390                 cpacf_kmc(sctx->fc | modifier, &param,
391                           walk->dst.virt.addr, walk->src.virt.addr, n);
392                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
393         }
394         memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
395         return ret;
396 }
397
398 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
399                            struct scatterlist *dst, struct scatterlist *src,
400                            unsigned int nbytes)
401 {
402         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
403         struct blkcipher_walk walk;
404
405         if (unlikely(!sctx->fc))
406                 return fallback_blk_enc(desc, dst, src, nbytes);
407
408         blkcipher_walk_init(&walk, dst, src, nbytes);
409         return cbc_aes_crypt(desc, 0, &walk);
410 }
411
412 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
413                            struct scatterlist *dst, struct scatterlist *src,
414                            unsigned int nbytes)
415 {
416         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
417         struct blkcipher_walk walk;
418
419         if (unlikely(!sctx->fc))
420                 return fallback_blk_dec(desc, dst, src, nbytes);
421
422         blkcipher_walk_init(&walk, dst, src, nbytes);
423         return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
424 }
425
426 static struct crypto_alg cbc_aes_alg = {
427         .cra_name               =       "cbc(aes)",
428         .cra_driver_name        =       "cbc-aes-s390",
429         .cra_priority           =       402,    /* ecb-aes-s390 + 1 */
430         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
431                                         CRYPTO_ALG_NEED_FALLBACK,
432         .cra_blocksize          =       AES_BLOCK_SIZE,
433         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
434         .cra_type               =       &crypto_blkcipher_type,
435         .cra_module             =       THIS_MODULE,
436         .cra_init               =       fallback_init_blk,
437         .cra_exit               =       fallback_exit_blk,
438         .cra_u                  =       {
439                 .blkcipher = {
440                         .min_keysize            =       AES_MIN_KEY_SIZE,
441                         .max_keysize            =       AES_MAX_KEY_SIZE,
442                         .ivsize                 =       AES_BLOCK_SIZE,
443                         .setkey                 =       cbc_aes_set_key,
444                         .encrypt                =       cbc_aes_encrypt,
445                         .decrypt                =       cbc_aes_decrypt,
446                 }
447         }
448 };
449
450 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
451                                    unsigned int len)
452 {
453         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
454         unsigned int ret;
455
456         crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
457                                          CRYPTO_TFM_REQ_MASK);
458         crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
459                                                      CRYPTO_TFM_REQ_MASK);
460
461         ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
462
463         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
464         tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
465                           CRYPTO_TFM_RES_MASK;
466
467         return ret;
468 }
469
470 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
471                 struct scatterlist *dst, struct scatterlist *src,
472                 unsigned int nbytes)
473 {
474         struct crypto_blkcipher *tfm = desc->tfm;
475         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
476         SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
477         unsigned int ret;
478
479         skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
480         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
481         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
482
483         ret = crypto_skcipher_decrypt(req);
484
485         skcipher_request_zero(req);
486         return ret;
487 }
488
489 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
490                 struct scatterlist *dst, struct scatterlist *src,
491                 unsigned int nbytes)
492 {
493         struct crypto_blkcipher *tfm = desc->tfm;
494         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
495         SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
496         unsigned int ret;
497
498         skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
499         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
500         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
501
502         ret = crypto_skcipher_encrypt(req);
503
504         skcipher_request_zero(req);
505         return ret;
506 }
507
508 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
509                            unsigned int key_len)
510 {
511         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
512         unsigned long fc;
513         int err;
514
515         err = xts_check_key(tfm, in_key, key_len);
516         if (err)
517                 return err;
518
519         /* In fips mode only 128 bit or 256 bit keys are valid */
520         if (fips_enabled && key_len != 32 && key_len != 64) {
521                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
522                 return -EINVAL;
523         }
524
525         /* Pick the correct function code based on the key length */
526         fc = (key_len == 32) ? CPACF_KM_XTS_128 :
527              (key_len == 64) ? CPACF_KM_XTS_256 : 0;
528
529         /* Check if the function code is available */
530         xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
531         if (!xts_ctx->fc)
532                 return xts_fallback_setkey(tfm, in_key, key_len);
533
534         /* Split the XTS key into the two subkeys */
535         key_len = key_len / 2;
536         xts_ctx->key_len = key_len;
537         memcpy(xts_ctx->key, in_key, key_len);
538         memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
539         return 0;
540 }
541
542 static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
543                          struct blkcipher_walk *walk)
544 {
545         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
546         unsigned int offset, nbytes, n;
547         int ret;
548         struct {
549                 u8 key[32];
550                 u8 tweak[16];
551                 u8 block[16];
552                 u8 bit[16];
553                 u8 xts[16];
554         } pcc_param;
555         struct {
556                 u8 key[32];
557                 u8 init[16];
558         } xts_param;
559
560         ret = blkcipher_walk_virt(desc, walk);
561         offset = xts_ctx->key_len & 0x10;
562         memset(pcc_param.block, 0, sizeof(pcc_param.block));
563         memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
564         memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
565         memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
566         memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
567         cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
568
569         memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
570         memcpy(xts_param.init, pcc_param.xts, 16);
571
572         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
573                 /* only use complete blocks */
574                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
575                 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
576                          walk->dst.virt.addr, walk->src.virt.addr, n);
577                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
578         }
579         return ret;
580 }
581
582 static int xts_aes_encrypt(struct blkcipher_desc *desc,
583                            struct scatterlist *dst, struct scatterlist *src,
584                            unsigned int nbytes)
585 {
586         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
587         struct blkcipher_walk walk;
588
589         if (unlikely(!xts_ctx->fc))
590                 return xts_fallback_encrypt(desc, dst, src, nbytes);
591
592         blkcipher_walk_init(&walk, dst, src, nbytes);
593         return xts_aes_crypt(desc, 0, &walk);
594 }
595
596 static int xts_aes_decrypt(struct blkcipher_desc *desc,
597                            struct scatterlist *dst, struct scatterlist *src,
598                            unsigned int nbytes)
599 {
600         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
601         struct blkcipher_walk walk;
602
603         if (unlikely(!xts_ctx->fc))
604                 return xts_fallback_decrypt(desc, dst, src, nbytes);
605
606         blkcipher_walk_init(&walk, dst, src, nbytes);
607         return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
608 }
609
610 static int xts_fallback_init(struct crypto_tfm *tfm)
611 {
612         const char *name = tfm->__crt_alg->cra_name;
613         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
614
615         xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
616                                                   CRYPTO_ALG_NEED_FALLBACK);
617
618         if (IS_ERR(xts_ctx->fallback)) {
619                 pr_err("Allocating XTS fallback algorithm %s failed\n",
620                        name);
621                 return PTR_ERR(xts_ctx->fallback);
622         }
623         return 0;
624 }
625
626 static void xts_fallback_exit(struct crypto_tfm *tfm)
627 {
628         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
629
630         crypto_free_sync_skcipher(xts_ctx->fallback);
631 }
632
633 static struct crypto_alg xts_aes_alg = {
634         .cra_name               =       "xts(aes)",
635         .cra_driver_name        =       "xts-aes-s390",
636         .cra_priority           =       402,    /* ecb-aes-s390 + 1 */
637         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
638                                         CRYPTO_ALG_NEED_FALLBACK,
639         .cra_blocksize          =       AES_BLOCK_SIZE,
640         .cra_ctxsize            =       sizeof(struct s390_xts_ctx),
641         .cra_type               =       &crypto_blkcipher_type,
642         .cra_module             =       THIS_MODULE,
643         .cra_init               =       xts_fallback_init,
644         .cra_exit               =       xts_fallback_exit,
645         .cra_u                  =       {
646                 .blkcipher = {
647                         .min_keysize            =       2 * AES_MIN_KEY_SIZE,
648                         .max_keysize            =       2 * AES_MAX_KEY_SIZE,
649                         .ivsize                 =       AES_BLOCK_SIZE,
650                         .setkey                 =       xts_aes_set_key,
651                         .encrypt                =       xts_aes_encrypt,
652                         .decrypt                =       xts_aes_decrypt,
653                 }
654         }
655 };
656
657 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
658                            unsigned int key_len)
659 {
660         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
661         unsigned long fc;
662
663         /* Pick the correct function code based on the key length */
664         fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
665              (key_len == 24) ? CPACF_KMCTR_AES_192 :
666              (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
667
668         /* Check if the function code is available */
669         sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
670         if (!sctx->fc)
671                 return setkey_fallback_blk(tfm, in_key, key_len);
672
673         sctx->key_len = key_len;
674         memcpy(sctx->key, in_key, key_len);
675         return 0;
676 }
677
678 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
679 {
680         unsigned int i, n;
681
682         /* only use complete blocks, max. PAGE_SIZE */
683         memcpy(ctrptr, iv, AES_BLOCK_SIZE);
684         n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
685         for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
686                 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
687                 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
688                 ctrptr += AES_BLOCK_SIZE;
689         }
690         return n;
691 }
692
693 static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
694                          struct blkcipher_walk *walk)
695 {
696         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
697         u8 buf[AES_BLOCK_SIZE], *ctrptr;
698         unsigned int n, nbytes;
699         int ret, locked;
700
701         locked = mutex_trylock(&ctrblk_lock);
702
703         ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
704         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
705                 n = AES_BLOCK_SIZE;
706                 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
707                         n = __ctrblk_init(ctrblk, walk->iv, nbytes);
708                 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
709                 cpacf_kmctr(sctx->fc | modifier, sctx->key,
710                             walk->dst.virt.addr, walk->src.virt.addr,
711                             n, ctrptr);
712                 if (ctrptr == ctrblk)
713                         memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
714                                AES_BLOCK_SIZE);
715                 crypto_inc(walk->iv, AES_BLOCK_SIZE);
716                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
717         }
718         if (locked)
719                 mutex_unlock(&ctrblk_lock);
720         /*
721          * final block may be < AES_BLOCK_SIZE, copy only nbytes
722          */
723         if (nbytes) {
724                 cpacf_kmctr(sctx->fc | modifier, sctx->key,
725                             buf, walk->src.virt.addr,
726                             AES_BLOCK_SIZE, walk->iv);
727                 memcpy(walk->dst.virt.addr, buf, nbytes);
728                 crypto_inc(walk->iv, AES_BLOCK_SIZE);
729                 ret = blkcipher_walk_done(desc, walk, 0);
730         }
731
732         return ret;
733 }
734
735 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
736                            struct scatterlist *dst, struct scatterlist *src,
737                            unsigned int nbytes)
738 {
739         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
740         struct blkcipher_walk walk;
741
742         if (unlikely(!sctx->fc))
743                 return fallback_blk_enc(desc, dst, src, nbytes);
744
745         blkcipher_walk_init(&walk, dst, src, nbytes);
746         return ctr_aes_crypt(desc, 0, &walk);
747 }
748
749 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
750                            struct scatterlist *dst, struct scatterlist *src,
751                            unsigned int nbytes)
752 {
753         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
754         struct blkcipher_walk walk;
755
756         if (unlikely(!sctx->fc))
757                 return fallback_blk_dec(desc, dst, src, nbytes);
758
759         blkcipher_walk_init(&walk, dst, src, nbytes);
760         return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
761 }
762
763 static struct crypto_alg ctr_aes_alg = {
764         .cra_name               =       "ctr(aes)",
765         .cra_driver_name        =       "ctr-aes-s390",
766         .cra_priority           =       402,    /* ecb-aes-s390 + 1 */
767         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
768                                         CRYPTO_ALG_NEED_FALLBACK,
769         .cra_blocksize          =       1,
770         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
771         .cra_type               =       &crypto_blkcipher_type,
772         .cra_module             =       THIS_MODULE,
773         .cra_init               =       fallback_init_blk,
774         .cra_exit               =       fallback_exit_blk,
775         .cra_u                  =       {
776                 .blkcipher = {
777                         .min_keysize            =       AES_MIN_KEY_SIZE,
778                         .max_keysize            =       AES_MAX_KEY_SIZE,
779                         .ivsize                 =       AES_BLOCK_SIZE,
780                         .setkey                 =       ctr_aes_set_key,
781                         .encrypt                =       ctr_aes_encrypt,
782                         .decrypt                =       ctr_aes_decrypt,
783                 }
784         }
785 };
786
787 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
788                           unsigned int keylen)
789 {
790         struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
791
792         switch (keylen) {
793         case AES_KEYSIZE_128:
794                 ctx->fc = CPACF_KMA_GCM_AES_128;
795                 break;
796         case AES_KEYSIZE_192:
797                 ctx->fc = CPACF_KMA_GCM_AES_192;
798                 break;
799         case AES_KEYSIZE_256:
800                 ctx->fc = CPACF_KMA_GCM_AES_256;
801                 break;
802         default:
803                 return -EINVAL;
804         }
805
806         memcpy(ctx->key, key, keylen);
807         ctx->key_len = keylen;
808         return 0;
809 }
810
811 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
812 {
813         switch (authsize) {
814         case 4:
815         case 8:
816         case 12:
817         case 13:
818         case 14:
819         case 15:
820         case 16:
821                 break;
822         default:
823                 return -EINVAL;
824         }
825
826         return 0;
827 }
828
829 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
830                            unsigned int len)
831 {
832         memset(gw, 0, sizeof(*gw));
833         gw->walk_bytes_remain = len;
834         scatterwalk_start(&gw->walk, sg);
835 }
836
837 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
838 {
839         struct scatterlist *nextsg;
840
841         gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
842         while (!gw->walk_bytes) {
843                 nextsg = sg_next(gw->walk.sg);
844                 if (!nextsg)
845                         return 0;
846                 scatterwalk_start(&gw->walk, nextsg);
847                 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
848                                                    gw->walk_bytes_remain);
849         }
850         gw->walk_ptr = scatterwalk_map(&gw->walk);
851         return gw->walk_bytes;
852 }
853
854 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
855                                              unsigned int nbytes)
856 {
857         gw->walk_bytes_remain -= nbytes;
858         scatterwalk_unmap(&gw->walk);
859         scatterwalk_advance(&gw->walk, nbytes);
860         scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
861         gw->walk_ptr = NULL;
862 }
863
864 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
865 {
866         int n;
867
868         if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
869                 gw->ptr = gw->buf;
870                 gw->nbytes = gw->buf_bytes;
871                 goto out;
872         }
873
874         if (gw->walk_bytes_remain == 0) {
875                 gw->ptr = NULL;
876                 gw->nbytes = 0;
877                 goto out;
878         }
879
880         if (!_gcm_sg_clamp_and_map(gw)) {
881                 gw->ptr = NULL;
882                 gw->nbytes = 0;
883                 goto out;
884         }
885
886         if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
887                 gw->ptr = gw->walk_ptr;
888                 gw->nbytes = gw->walk_bytes;
889                 goto out;
890         }
891
892         while (1) {
893                 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
894                 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
895                 gw->buf_bytes += n;
896                 _gcm_sg_unmap_and_advance(gw, n);
897                 if (gw->buf_bytes >= minbytesneeded) {
898                         gw->ptr = gw->buf;
899                         gw->nbytes = gw->buf_bytes;
900                         goto out;
901                 }
902                 if (!_gcm_sg_clamp_and_map(gw)) {
903                         gw->ptr = NULL;
904                         gw->nbytes = 0;
905                         goto out;
906                 }
907         }
908
909 out:
910         return gw->nbytes;
911 }
912
913 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
914 {
915         if (gw->walk_bytes_remain == 0) {
916                 gw->ptr = NULL;
917                 gw->nbytes = 0;
918                 goto out;
919         }
920
921         if (!_gcm_sg_clamp_and_map(gw)) {
922                 gw->ptr = NULL;
923                 gw->nbytes = 0;
924                 goto out;
925         }
926
927         if (gw->walk_bytes >= minbytesneeded) {
928                 gw->ptr = gw->walk_ptr;
929                 gw->nbytes = gw->walk_bytes;
930                 goto out;
931         }
932
933         scatterwalk_unmap(&gw->walk);
934         gw->walk_ptr = NULL;
935
936         gw->ptr = gw->buf;
937         gw->nbytes = sizeof(gw->buf);
938
939 out:
940         return gw->nbytes;
941 }
942
943 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
944 {
945         if (gw->ptr == NULL)
946                 return 0;
947
948         if (gw->ptr == gw->buf) {
949                 int n = gw->buf_bytes - bytesdone;
950                 if (n > 0) {
951                         memmove(gw->buf, gw->buf + bytesdone, n);
952                         gw->buf_bytes = n;
953                 } else
954                         gw->buf_bytes = 0;
955         } else
956                 _gcm_sg_unmap_and_advance(gw, bytesdone);
957
958         return bytesdone;
959 }
960
961 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
962 {
963         int i, n;
964
965         if (gw->ptr == NULL)
966                 return 0;
967
968         if (gw->ptr == gw->buf) {
969                 for (i = 0; i < bytesdone; i += n) {
970                         if (!_gcm_sg_clamp_and_map(gw))
971                                 return i;
972                         n = min(gw->walk_bytes, bytesdone - i);
973                         memcpy(gw->walk_ptr, gw->buf + i, n);
974                         _gcm_sg_unmap_and_advance(gw, n);
975                 }
976         } else
977                 _gcm_sg_unmap_and_advance(gw, bytesdone);
978
979         return bytesdone;
980 }
981
982 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
983 {
984         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
985         struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
986         unsigned int ivsize = crypto_aead_ivsize(tfm);
987         unsigned int taglen = crypto_aead_authsize(tfm);
988         unsigned int aadlen = req->assoclen;
989         unsigned int pclen = req->cryptlen;
990         int ret = 0;
991
992         unsigned int n, len, in_bytes, out_bytes,
993                      min_bytes, bytes, aad_bytes, pc_bytes;
994         struct gcm_sg_walk gw_in, gw_out;
995         u8 tag[GHASH_DIGEST_SIZE];
996
997         struct {
998                 u32 _[3];               /* reserved */
999                 u32 cv;                 /* Counter Value */
1000                 u8 t[GHASH_DIGEST_SIZE];/* Tag */
1001                 u8 h[AES_BLOCK_SIZE];   /* Hash-subkey */
1002                 u64 taadl;              /* Total AAD Length */
1003                 u64 tpcl;               /* Total Plain-/Cipher-text Length */
1004                 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
1005                 u8 k[AES_MAX_KEY_SIZE]; /* Key */
1006         } param;
1007
1008         /*
1009          * encrypt
1010          *   req->src: aad||plaintext
1011          *   req->dst: aad||ciphertext||tag
1012          * decrypt
1013          *   req->src: aad||ciphertext||tag
1014          *   req->dst: aad||plaintext, return 0 or -EBADMSG
1015          * aad, plaintext and ciphertext may be empty.
1016          */
1017         if (flags & CPACF_DECRYPT)
1018                 pclen -= taglen;
1019         len = aadlen + pclen;
1020
1021         memset(&param, 0, sizeof(param));
1022         param.cv = 1;
1023         param.taadl = aadlen * 8;
1024         param.tpcl = pclen * 8;
1025         memcpy(param.j0, req->iv, ivsize);
1026         *(u32 *)(param.j0 + ivsize) = 1;
1027         memcpy(param.k, ctx->key, ctx->key_len);
1028
1029         gcm_walk_start(&gw_in, req->src, len);
1030         gcm_walk_start(&gw_out, req->dst, len);
1031
1032         do {
1033                 min_bytes = min_t(unsigned int,
1034                                   aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
1035                 in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
1036                 out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
1037                 bytes = min(in_bytes, out_bytes);
1038
1039                 if (aadlen + pclen <= bytes) {
1040                         aad_bytes = aadlen;
1041                         pc_bytes = pclen;
1042                         flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
1043                 } else {
1044                         if (aadlen <= bytes) {
1045                                 aad_bytes = aadlen;
1046                                 pc_bytes = (bytes - aadlen) &
1047                                            ~(AES_BLOCK_SIZE - 1);
1048                                 flags |= CPACF_KMA_LAAD;
1049                         } else {
1050                                 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
1051                                 pc_bytes = 0;
1052                         }
1053                 }
1054
1055                 if (aad_bytes > 0)
1056                         memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
1057
1058                 cpacf_kma(ctx->fc | flags, &param,
1059                           gw_out.ptr + aad_bytes,
1060                           gw_in.ptr + aad_bytes, pc_bytes,
1061                           gw_in.ptr, aad_bytes);
1062
1063                 n = aad_bytes + pc_bytes;
1064                 if (gcm_in_walk_done(&gw_in, n) != n)
1065                         return -ENOMEM;
1066                 if (gcm_out_walk_done(&gw_out, n) != n)
1067                         return -ENOMEM;
1068                 aadlen -= aad_bytes;
1069                 pclen -= pc_bytes;
1070         } while (aadlen + pclen > 0);
1071
1072         if (flags & CPACF_DECRYPT) {
1073                 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1074                 if (crypto_memneq(tag, param.t, taglen))
1075                         ret = -EBADMSG;
1076         } else
1077                 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1078
1079         memzero_explicit(&param, sizeof(param));
1080         return ret;
1081 }
1082
1083 static int gcm_aes_encrypt(struct aead_request *req)
1084 {
1085         return gcm_aes_crypt(req, CPACF_ENCRYPT);
1086 }
1087
1088 static int gcm_aes_decrypt(struct aead_request *req)
1089 {
1090         return gcm_aes_crypt(req, CPACF_DECRYPT);
1091 }
1092
1093 static struct aead_alg gcm_aes_aead = {
1094         .setkey                 = gcm_aes_setkey,
1095         .setauthsize            = gcm_aes_setauthsize,
1096         .encrypt                = gcm_aes_encrypt,
1097         .decrypt                = gcm_aes_decrypt,
1098
1099         .ivsize                 = GHASH_BLOCK_SIZE - sizeof(u32),
1100         .maxauthsize            = GHASH_DIGEST_SIZE,
1101         .chunksize              = AES_BLOCK_SIZE,
1102
1103         .base                   = {
1104                 .cra_blocksize          = 1,
1105                 .cra_ctxsize            = sizeof(struct s390_aes_ctx),
1106                 .cra_priority           = 900,
1107                 .cra_name               = "gcm(aes)",
1108                 .cra_driver_name        = "gcm-aes-s390",
1109                 .cra_module             = THIS_MODULE,
1110         },
1111 };
1112
1113 static struct crypto_alg *aes_s390_algs_ptr[5];
1114 static int aes_s390_algs_num;
1115 static struct aead_alg *aes_s390_aead_alg;
1116
1117 static int aes_s390_register_alg(struct crypto_alg *alg)
1118 {
1119         int ret;
1120
1121         ret = crypto_register_alg(alg);
1122         if (!ret)
1123                 aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
1124         return ret;
1125 }
1126
1127 static void aes_s390_fini(void)
1128 {
1129         while (aes_s390_algs_num--)
1130                 crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
1131         if (ctrblk)
1132                 free_page((unsigned long) ctrblk);
1133
1134         if (aes_s390_aead_alg)
1135                 crypto_unregister_aead(aes_s390_aead_alg);
1136 }
1137
1138 static int __init aes_s390_init(void)
1139 {
1140         int ret;
1141
1142         /* Query available functions for KM, KMC, KMCTR and KMA */
1143         cpacf_query(CPACF_KM, &km_functions);
1144         cpacf_query(CPACF_KMC, &kmc_functions);
1145         cpacf_query(CPACF_KMCTR, &kmctr_functions);
1146         cpacf_query(CPACF_KMA, &kma_functions);
1147
1148         if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1149             cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1150             cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1151                 ret = aes_s390_register_alg(&aes_alg);
1152                 if (ret)
1153                         goto out_err;
1154                 ret = aes_s390_register_alg(&ecb_aes_alg);
1155                 if (ret)
1156                         goto out_err;
1157         }
1158
1159         if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1160             cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1161             cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1162                 ret = aes_s390_register_alg(&cbc_aes_alg);
1163                 if (ret)
1164                         goto out_err;
1165         }
1166
1167         if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1168             cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1169                 ret = aes_s390_register_alg(&xts_aes_alg);
1170                 if (ret)
1171                         goto out_err;
1172         }
1173
1174         if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1175             cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1176             cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1177                 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1178                 if (!ctrblk) {
1179                         ret = -ENOMEM;
1180                         goto out_err;
1181                 }
1182                 ret = aes_s390_register_alg(&ctr_aes_alg);
1183                 if (ret)
1184                         goto out_err;
1185         }
1186
1187         if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1188             cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1189             cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1190                 ret = crypto_register_aead(&gcm_aes_aead);
1191                 if (ret)
1192                         goto out_err;
1193                 aes_s390_aead_alg = &gcm_aes_aead;
1194         }
1195
1196         return 0;
1197 out_err:
1198         aes_s390_fini();
1199         return ret;
1200 }
1201
1202 module_cpu_feature_match(MSA, aes_s390_init);
1203 module_exit(aes_s390_fini);
1204
1205 MODULE_ALIAS_CRYPTO("aes-all");
1206
1207 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1208 MODULE_LICENSE("GPL");