Merge branch 'topic/docs-next' into v4l_for_linus
[sfrench/cifs-2.6.git] / arch / s390 / crypto / aes_s390.c
1 /*
2  * Cryptographic API.
3  *
4  * s390 implementation of the AES Cipher Algorithm.
5  *
6  * s390 Version:
7  *   Copyright IBM Corp. 2005, 2007
8  *   Author(s): Jan Glauber (jang@de.ibm.com)
9  *              Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10  *
11  * Derived from "crypto/aes_generic.c"
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19
20 #define KMSG_COMPONENT "aes_s390"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/internal/skcipher.h>
26 #include <linux/err.h>
27 #include <linux/module.h>
28 #include <linux/cpufeature.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <crypto/xts.h>
32 #include <asm/cpacf.h>
33
34 #define AES_KEYLEN_128          1
35 #define AES_KEYLEN_192          2
36 #define AES_KEYLEN_256          4
37
38 static u8 *ctrblk;
39 static DEFINE_SPINLOCK(ctrblk_lock);
40 static char keylen_flag;
41
42 struct s390_aes_ctx {
43         u8 key[AES_MAX_KEY_SIZE];
44         long enc;
45         long dec;
46         int key_len;
47         union {
48                 struct crypto_skcipher *blk;
49                 struct crypto_cipher *cip;
50         } fallback;
51 };
52
53 struct pcc_param {
54         u8 key[32];
55         u8 tweak[16];
56         u8 block[16];
57         u8 bit[16];
58         u8 xts[16];
59 };
60
61 struct s390_xts_ctx {
62         u8 key[32];
63         u8 pcc_key[32];
64         long enc;
65         long dec;
66         int key_len;
67         struct crypto_skcipher *fallback;
68 };
69
70 /*
71  * Check if the key_len is supported by the HW.
72  * Returns 0 if it is, a positive number if it is not and software fallback is
73  * required or a negative number in case the key size is not valid
74  */
75 static int need_fallback(unsigned int key_len)
76 {
77         switch (key_len) {
78         case 16:
79                 if (!(keylen_flag & AES_KEYLEN_128))
80                         return 1;
81                 break;
82         case 24:
83                 if (!(keylen_flag & AES_KEYLEN_192))
84                         return 1;
85                 break;
86         case 32:
87                 if (!(keylen_flag & AES_KEYLEN_256))
88                         return 1;
89                 break;
90         default:
91                 return -1;
92                 break;
93         }
94         return 0;
95 }
96
97 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
98                 unsigned int key_len)
99 {
100         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
101         int ret;
102
103         sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
104         sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
105                         CRYPTO_TFM_REQ_MASK);
106
107         ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
108         if (ret) {
109                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
110                 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
111                                 CRYPTO_TFM_RES_MASK);
112         }
113         return ret;
114 }
115
116 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
117                        unsigned int key_len)
118 {
119         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
120         u32 *flags = &tfm->crt_flags;
121         int ret;
122
123         ret = need_fallback(key_len);
124         if (ret < 0) {
125                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
126                 return -EINVAL;
127         }
128
129         sctx->key_len = key_len;
130         if (!ret) {
131                 memcpy(sctx->key, in_key, key_len);
132                 return 0;
133         }
134
135         return setkey_fallback_cip(tfm, in_key, key_len);
136 }
137
138 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
139 {
140         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
141
142         if (unlikely(need_fallback(sctx->key_len))) {
143                 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
144                 return;
145         }
146
147         switch (sctx->key_len) {
148         case 16:
149                 cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
150                          AES_BLOCK_SIZE);
151                 break;
152         case 24:
153                 cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
154                          AES_BLOCK_SIZE);
155                 break;
156         case 32:
157                 cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
158                          AES_BLOCK_SIZE);
159                 break;
160         }
161 }
162
163 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
164 {
165         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
166
167         if (unlikely(need_fallback(sctx->key_len))) {
168                 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
169                 return;
170         }
171
172         switch (sctx->key_len) {
173         case 16:
174                 cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
175                          AES_BLOCK_SIZE);
176                 break;
177         case 24:
178                 cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
179                          AES_BLOCK_SIZE);
180                 break;
181         case 32:
182                 cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
183                          AES_BLOCK_SIZE);
184                 break;
185         }
186 }
187
188 static int fallback_init_cip(struct crypto_tfm *tfm)
189 {
190         const char *name = tfm->__crt_alg->cra_name;
191         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
192
193         sctx->fallback.cip = crypto_alloc_cipher(name, 0,
194                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
195
196         if (IS_ERR(sctx->fallback.cip)) {
197                 pr_err("Allocating AES fallback algorithm %s failed\n",
198                        name);
199                 return PTR_ERR(sctx->fallback.cip);
200         }
201
202         return 0;
203 }
204
205 static void fallback_exit_cip(struct crypto_tfm *tfm)
206 {
207         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
208
209         crypto_free_cipher(sctx->fallback.cip);
210         sctx->fallback.cip = NULL;
211 }
212
213 static struct crypto_alg aes_alg = {
214         .cra_name               =       "aes",
215         .cra_driver_name        =       "aes-s390",
216         .cra_priority           =       300,
217         .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER |
218                                         CRYPTO_ALG_NEED_FALLBACK,
219         .cra_blocksize          =       AES_BLOCK_SIZE,
220         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
221         .cra_module             =       THIS_MODULE,
222         .cra_init               =       fallback_init_cip,
223         .cra_exit               =       fallback_exit_cip,
224         .cra_u                  =       {
225                 .cipher = {
226                         .cia_min_keysize        =       AES_MIN_KEY_SIZE,
227                         .cia_max_keysize        =       AES_MAX_KEY_SIZE,
228                         .cia_setkey             =       aes_set_key,
229                         .cia_encrypt            =       aes_encrypt,
230                         .cia_decrypt            =       aes_decrypt,
231                 }
232         }
233 };
234
235 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
236                 unsigned int len)
237 {
238         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
239         unsigned int ret;
240
241         crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
242         crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
243                                                       CRYPTO_TFM_REQ_MASK);
244
245         ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
246
247         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
248         tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
249                           CRYPTO_TFM_RES_MASK;
250
251         return ret;
252 }
253
254 static int fallback_blk_dec(struct blkcipher_desc *desc,
255                 struct scatterlist *dst, struct scatterlist *src,
256                 unsigned int nbytes)
257 {
258         unsigned int ret;
259         struct crypto_blkcipher *tfm = desc->tfm;
260         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
261         SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
262
263         skcipher_request_set_tfm(req, sctx->fallback.blk);
264         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
265         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
266
267         ret = crypto_skcipher_decrypt(req);
268
269         skcipher_request_zero(req);
270         return ret;
271 }
272
273 static int fallback_blk_enc(struct blkcipher_desc *desc,
274                 struct scatterlist *dst, struct scatterlist *src,
275                 unsigned int nbytes)
276 {
277         unsigned int ret;
278         struct crypto_blkcipher *tfm = desc->tfm;
279         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
280         SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
281
282         skcipher_request_set_tfm(req, sctx->fallback.blk);
283         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
284         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
285
286         ret = crypto_skcipher_encrypt(req);
287         return ret;
288 }
289
290 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
291                            unsigned int key_len)
292 {
293         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
294         int ret;
295
296         ret = need_fallback(key_len);
297         if (ret > 0) {
298                 sctx->key_len = key_len;
299                 return setkey_fallback_blk(tfm, in_key, key_len);
300         }
301
302         switch (key_len) {
303         case 16:
304                 sctx->enc = CPACF_KM_AES_128_ENC;
305                 sctx->dec = CPACF_KM_AES_128_DEC;
306                 break;
307         case 24:
308                 sctx->enc = CPACF_KM_AES_192_ENC;
309                 sctx->dec = CPACF_KM_AES_192_DEC;
310                 break;
311         case 32:
312                 sctx->enc = CPACF_KM_AES_256_ENC;
313                 sctx->dec = CPACF_KM_AES_256_DEC;
314                 break;
315         }
316
317         return aes_set_key(tfm, in_key, key_len);
318 }
319
320 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
321                          struct blkcipher_walk *walk)
322 {
323         int ret = blkcipher_walk_virt(desc, walk);
324         unsigned int nbytes;
325
326         while ((nbytes = walk->nbytes)) {
327                 /* only use complete blocks */
328                 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
329                 u8 *out = walk->dst.virt.addr;
330                 u8 *in = walk->src.virt.addr;
331
332                 ret = cpacf_km(func, param, out, in, n);
333                 if (ret < 0 || ret != n)
334                         return -EIO;
335
336                 nbytes &= AES_BLOCK_SIZE - 1;
337                 ret = blkcipher_walk_done(desc, walk, nbytes);
338         }
339
340         return ret;
341 }
342
343 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
344                            struct scatterlist *dst, struct scatterlist *src,
345                            unsigned int nbytes)
346 {
347         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
348         struct blkcipher_walk walk;
349
350         if (unlikely(need_fallback(sctx->key_len)))
351                 return fallback_blk_enc(desc, dst, src, nbytes);
352
353         blkcipher_walk_init(&walk, dst, src, nbytes);
354         return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
355 }
356
357 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
358                            struct scatterlist *dst, struct scatterlist *src,
359                            unsigned int nbytes)
360 {
361         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
362         struct blkcipher_walk walk;
363
364         if (unlikely(need_fallback(sctx->key_len)))
365                 return fallback_blk_dec(desc, dst, src, nbytes);
366
367         blkcipher_walk_init(&walk, dst, src, nbytes);
368         return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
369 }
370
371 static int fallback_init_blk(struct crypto_tfm *tfm)
372 {
373         const char *name = tfm->__crt_alg->cra_name;
374         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
375
376         sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
377                                                    CRYPTO_ALG_ASYNC |
378                                                    CRYPTO_ALG_NEED_FALLBACK);
379
380         if (IS_ERR(sctx->fallback.blk)) {
381                 pr_err("Allocating AES fallback algorithm %s failed\n",
382                        name);
383                 return PTR_ERR(sctx->fallback.blk);
384         }
385
386         return 0;
387 }
388
389 static void fallback_exit_blk(struct crypto_tfm *tfm)
390 {
391         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
392
393         crypto_free_skcipher(sctx->fallback.blk);
394 }
395
396 static struct crypto_alg ecb_aes_alg = {
397         .cra_name               =       "ecb(aes)",
398         .cra_driver_name        =       "ecb-aes-s390",
399         .cra_priority           =       400,    /* combo: aes + ecb */
400         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
401                                         CRYPTO_ALG_NEED_FALLBACK,
402         .cra_blocksize          =       AES_BLOCK_SIZE,
403         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
404         .cra_type               =       &crypto_blkcipher_type,
405         .cra_module             =       THIS_MODULE,
406         .cra_init               =       fallback_init_blk,
407         .cra_exit               =       fallback_exit_blk,
408         .cra_u                  =       {
409                 .blkcipher = {
410                         .min_keysize            =       AES_MIN_KEY_SIZE,
411                         .max_keysize            =       AES_MAX_KEY_SIZE,
412                         .setkey                 =       ecb_aes_set_key,
413                         .encrypt                =       ecb_aes_encrypt,
414                         .decrypt                =       ecb_aes_decrypt,
415                 }
416         }
417 };
418
419 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
420                            unsigned int key_len)
421 {
422         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
423         int ret;
424
425         ret = need_fallback(key_len);
426         if (ret > 0) {
427                 sctx->key_len = key_len;
428                 return setkey_fallback_blk(tfm, in_key, key_len);
429         }
430
431         switch (key_len) {
432         case 16:
433                 sctx->enc = CPACF_KMC_AES_128_ENC;
434                 sctx->dec = CPACF_KMC_AES_128_DEC;
435                 break;
436         case 24:
437                 sctx->enc = CPACF_KMC_AES_192_ENC;
438                 sctx->dec = CPACF_KMC_AES_192_DEC;
439                 break;
440         case 32:
441                 sctx->enc = CPACF_KMC_AES_256_ENC;
442                 sctx->dec = CPACF_KMC_AES_256_DEC;
443                 break;
444         }
445
446         return aes_set_key(tfm, in_key, key_len);
447 }
448
449 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
450                          struct blkcipher_walk *walk)
451 {
452         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
453         int ret = blkcipher_walk_virt(desc, walk);
454         unsigned int nbytes = walk->nbytes;
455         struct {
456                 u8 iv[AES_BLOCK_SIZE];
457                 u8 key[AES_MAX_KEY_SIZE];
458         } param;
459
460         if (!nbytes)
461                 goto out;
462
463         memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
464         memcpy(param.key, sctx->key, sctx->key_len);
465         do {
466                 /* only use complete blocks */
467                 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
468                 u8 *out = walk->dst.virt.addr;
469                 u8 *in = walk->src.virt.addr;
470
471                 ret = cpacf_kmc(func, &param, out, in, n);
472                 if (ret < 0 || ret != n)
473                         return -EIO;
474
475                 nbytes &= AES_BLOCK_SIZE - 1;
476                 ret = blkcipher_walk_done(desc, walk, nbytes);
477         } while ((nbytes = walk->nbytes));
478         memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
479
480 out:
481         return ret;
482 }
483
484 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
485                            struct scatterlist *dst, struct scatterlist *src,
486                            unsigned int nbytes)
487 {
488         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
489         struct blkcipher_walk walk;
490
491         if (unlikely(need_fallback(sctx->key_len)))
492                 return fallback_blk_enc(desc, dst, src, nbytes);
493
494         blkcipher_walk_init(&walk, dst, src, nbytes);
495         return cbc_aes_crypt(desc, sctx->enc, &walk);
496 }
497
498 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
499                            struct scatterlist *dst, struct scatterlist *src,
500                            unsigned int nbytes)
501 {
502         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
503         struct blkcipher_walk walk;
504
505         if (unlikely(need_fallback(sctx->key_len)))
506                 return fallback_blk_dec(desc, dst, src, nbytes);
507
508         blkcipher_walk_init(&walk, dst, src, nbytes);
509         return cbc_aes_crypt(desc, sctx->dec, &walk);
510 }
511
512 static struct crypto_alg cbc_aes_alg = {
513         .cra_name               =       "cbc(aes)",
514         .cra_driver_name        =       "cbc-aes-s390",
515         .cra_priority           =       400,    /* combo: aes + cbc */
516         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
517                                         CRYPTO_ALG_NEED_FALLBACK,
518         .cra_blocksize          =       AES_BLOCK_SIZE,
519         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
520         .cra_type               =       &crypto_blkcipher_type,
521         .cra_module             =       THIS_MODULE,
522         .cra_init               =       fallback_init_blk,
523         .cra_exit               =       fallback_exit_blk,
524         .cra_u                  =       {
525                 .blkcipher = {
526                         .min_keysize            =       AES_MIN_KEY_SIZE,
527                         .max_keysize            =       AES_MAX_KEY_SIZE,
528                         .ivsize                 =       AES_BLOCK_SIZE,
529                         .setkey                 =       cbc_aes_set_key,
530                         .encrypt                =       cbc_aes_encrypt,
531                         .decrypt                =       cbc_aes_decrypt,
532                 }
533         }
534 };
535
536 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
537                                    unsigned int len)
538 {
539         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
540         unsigned int ret;
541
542         crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
543         crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
544                                                      CRYPTO_TFM_REQ_MASK);
545
546         ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
547
548         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
549         tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
550                           CRYPTO_TFM_RES_MASK;
551
552         return ret;
553 }
554
555 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
556                 struct scatterlist *dst, struct scatterlist *src,
557                 unsigned int nbytes)
558 {
559         struct crypto_blkcipher *tfm = desc->tfm;
560         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
561         SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
562         unsigned int ret;
563
564         skcipher_request_set_tfm(req, xts_ctx->fallback);
565         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
566         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
567
568         ret = crypto_skcipher_decrypt(req);
569
570         skcipher_request_zero(req);
571         return ret;
572 }
573
574 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
575                 struct scatterlist *dst, struct scatterlist *src,
576                 unsigned int nbytes)
577 {
578         struct crypto_blkcipher *tfm = desc->tfm;
579         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
580         SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
581         unsigned int ret;
582
583         skcipher_request_set_tfm(req, xts_ctx->fallback);
584         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
585         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
586
587         ret = crypto_skcipher_encrypt(req);
588
589         skcipher_request_zero(req);
590         return ret;
591 }
592
593 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
594                            unsigned int key_len)
595 {
596         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
597         u32 *flags = &tfm->crt_flags;
598         int err;
599
600         err = xts_check_key(tfm, in_key, key_len);
601         if (err)
602                 return err;
603
604         switch (key_len) {
605         case 32:
606                 xts_ctx->enc = CPACF_KM_XTS_128_ENC;
607                 xts_ctx->dec = CPACF_KM_XTS_128_DEC;
608                 memcpy(xts_ctx->key + 16, in_key, 16);
609                 memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
610                 break;
611         case 48:
612                 xts_ctx->enc = 0;
613                 xts_ctx->dec = 0;
614                 xts_fallback_setkey(tfm, in_key, key_len);
615                 break;
616         case 64:
617                 xts_ctx->enc = CPACF_KM_XTS_256_ENC;
618                 xts_ctx->dec = CPACF_KM_XTS_256_DEC;
619                 memcpy(xts_ctx->key, in_key, 32);
620                 memcpy(xts_ctx->pcc_key, in_key + 32, 32);
621                 break;
622         default:
623                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
624                 return -EINVAL;
625         }
626         xts_ctx->key_len = key_len;
627         return 0;
628 }
629
630 static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
631                          struct s390_xts_ctx *xts_ctx,
632                          struct blkcipher_walk *walk)
633 {
634         unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
635         int ret = blkcipher_walk_virt(desc, walk);
636         unsigned int nbytes = walk->nbytes;
637         unsigned int n;
638         u8 *in, *out;
639         struct pcc_param pcc_param;
640         struct {
641                 u8 key[32];
642                 u8 init[16];
643         } xts_param;
644
645         if (!nbytes)
646                 goto out;
647
648         memset(pcc_param.block, 0, sizeof(pcc_param.block));
649         memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
650         memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
651         memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
652         memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
653         /* remove decipher modifier bit from 'func' and call PCC */
654         ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
655         if (ret < 0)
656                 return -EIO;
657
658         memcpy(xts_param.key, xts_ctx->key, 32);
659         memcpy(xts_param.init, pcc_param.xts, 16);
660         do {
661                 /* only use complete blocks */
662                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
663                 out = walk->dst.virt.addr;
664                 in = walk->src.virt.addr;
665
666                 ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
667                 if (ret < 0 || ret != n)
668                         return -EIO;
669
670                 nbytes &= AES_BLOCK_SIZE - 1;
671                 ret = blkcipher_walk_done(desc, walk, nbytes);
672         } while ((nbytes = walk->nbytes));
673 out:
674         return ret;
675 }
676
677 static int xts_aes_encrypt(struct blkcipher_desc *desc,
678                            struct scatterlist *dst, struct scatterlist *src,
679                            unsigned int nbytes)
680 {
681         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
682         struct blkcipher_walk walk;
683
684         if (unlikely(xts_ctx->key_len == 48))
685                 return xts_fallback_encrypt(desc, dst, src, nbytes);
686
687         blkcipher_walk_init(&walk, dst, src, nbytes);
688         return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
689 }
690
691 static int xts_aes_decrypt(struct blkcipher_desc *desc,
692                            struct scatterlist *dst, struct scatterlist *src,
693                            unsigned int nbytes)
694 {
695         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
696         struct blkcipher_walk walk;
697
698         if (unlikely(xts_ctx->key_len == 48))
699                 return xts_fallback_decrypt(desc, dst, src, nbytes);
700
701         blkcipher_walk_init(&walk, dst, src, nbytes);
702         return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
703 }
704
705 static int xts_fallback_init(struct crypto_tfm *tfm)
706 {
707         const char *name = tfm->__crt_alg->cra_name;
708         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
709
710         xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
711                                                   CRYPTO_ALG_ASYNC |
712                                                   CRYPTO_ALG_NEED_FALLBACK);
713
714         if (IS_ERR(xts_ctx->fallback)) {
715                 pr_err("Allocating XTS fallback algorithm %s failed\n",
716                        name);
717                 return PTR_ERR(xts_ctx->fallback);
718         }
719         return 0;
720 }
721
722 static void xts_fallback_exit(struct crypto_tfm *tfm)
723 {
724         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
725
726         crypto_free_skcipher(xts_ctx->fallback);
727 }
728
729 static struct crypto_alg xts_aes_alg = {
730         .cra_name               =       "xts(aes)",
731         .cra_driver_name        =       "xts-aes-s390",
732         .cra_priority           =       400,    /* combo: aes + xts */
733         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
734                                         CRYPTO_ALG_NEED_FALLBACK,
735         .cra_blocksize          =       AES_BLOCK_SIZE,
736         .cra_ctxsize            =       sizeof(struct s390_xts_ctx),
737         .cra_type               =       &crypto_blkcipher_type,
738         .cra_module             =       THIS_MODULE,
739         .cra_init               =       xts_fallback_init,
740         .cra_exit               =       xts_fallback_exit,
741         .cra_u                  =       {
742                 .blkcipher = {
743                         .min_keysize            =       2 * AES_MIN_KEY_SIZE,
744                         .max_keysize            =       2 * AES_MAX_KEY_SIZE,
745                         .ivsize                 =       AES_BLOCK_SIZE,
746                         .setkey                 =       xts_aes_set_key,
747                         .encrypt                =       xts_aes_encrypt,
748                         .decrypt                =       xts_aes_decrypt,
749                 }
750         }
751 };
752
753 static int xts_aes_alg_reg;
754
755 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
756                            unsigned int key_len)
757 {
758         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
759
760         switch (key_len) {
761         case 16:
762                 sctx->enc = CPACF_KMCTR_AES_128_ENC;
763                 sctx->dec = CPACF_KMCTR_AES_128_DEC;
764                 break;
765         case 24:
766                 sctx->enc = CPACF_KMCTR_AES_192_ENC;
767                 sctx->dec = CPACF_KMCTR_AES_192_DEC;
768                 break;
769         case 32:
770                 sctx->enc = CPACF_KMCTR_AES_256_ENC;
771                 sctx->dec = CPACF_KMCTR_AES_256_DEC;
772                 break;
773         }
774
775         return aes_set_key(tfm, in_key, key_len);
776 }
777
778 static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
779 {
780         unsigned int i, n;
781
782         /* only use complete blocks, max. PAGE_SIZE */
783         n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
784         for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
785                 memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
786                        AES_BLOCK_SIZE);
787                 crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
788         }
789         return n;
790 }
791
792 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
793                          struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
794 {
795         int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
796         unsigned int n, nbytes;
797         u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
798         u8 *out, *in, *ctrptr = ctrbuf;
799
800         if (!walk->nbytes)
801                 return ret;
802
803         if (spin_trylock(&ctrblk_lock))
804                 ctrptr = ctrblk;
805
806         memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
807         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
808                 out = walk->dst.virt.addr;
809                 in = walk->src.virt.addr;
810                 while (nbytes >= AES_BLOCK_SIZE) {
811                         if (ctrptr == ctrblk)
812                                 n = __ctrblk_init(ctrptr, nbytes);
813                         else
814                                 n = AES_BLOCK_SIZE;
815                         ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
816                         if (ret < 0 || ret != n) {
817                                 if (ctrptr == ctrblk)
818                                         spin_unlock(&ctrblk_lock);
819                                 return -EIO;
820                         }
821                         if (n > AES_BLOCK_SIZE)
822                                 memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
823                                        AES_BLOCK_SIZE);
824                         crypto_inc(ctrptr, AES_BLOCK_SIZE);
825                         out += n;
826                         in += n;
827                         nbytes -= n;
828                 }
829                 ret = blkcipher_walk_done(desc, walk, nbytes);
830         }
831         if (ctrptr == ctrblk) {
832                 if (nbytes)
833                         memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
834                 else
835                         memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
836                 spin_unlock(&ctrblk_lock);
837         } else {
838                 if (!nbytes)
839                         memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
840         }
841         /*
842          * final block may be < AES_BLOCK_SIZE, copy only nbytes
843          */
844         if (nbytes) {
845                 out = walk->dst.virt.addr;
846                 in = walk->src.virt.addr;
847                 ret = cpacf_kmctr(func, sctx->key, buf, in,
848                                   AES_BLOCK_SIZE, ctrbuf);
849                 if (ret < 0 || ret != AES_BLOCK_SIZE)
850                         return -EIO;
851                 memcpy(out, buf, nbytes);
852                 crypto_inc(ctrbuf, AES_BLOCK_SIZE);
853                 ret = blkcipher_walk_done(desc, walk, 0);
854                 memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
855         }
856
857         return ret;
858 }
859
860 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
861                            struct scatterlist *dst, struct scatterlist *src,
862                            unsigned int nbytes)
863 {
864         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
865         struct blkcipher_walk walk;
866
867         blkcipher_walk_init(&walk, dst, src, nbytes);
868         return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
869 }
870
871 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
872                            struct scatterlist *dst, struct scatterlist *src,
873                            unsigned int nbytes)
874 {
875         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
876         struct blkcipher_walk walk;
877
878         blkcipher_walk_init(&walk, dst, src, nbytes);
879         return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
880 }
881
882 static struct crypto_alg ctr_aes_alg = {
883         .cra_name               =       "ctr(aes)",
884         .cra_driver_name        =       "ctr-aes-s390",
885         .cra_priority           =       400,    /* combo: aes + ctr */
886         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
887         .cra_blocksize          =       1,
888         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
889         .cra_type               =       &crypto_blkcipher_type,
890         .cra_module             =       THIS_MODULE,
891         .cra_u                  =       {
892                 .blkcipher = {
893                         .min_keysize            =       AES_MIN_KEY_SIZE,
894                         .max_keysize            =       AES_MAX_KEY_SIZE,
895                         .ivsize                 =       AES_BLOCK_SIZE,
896                         .setkey                 =       ctr_aes_set_key,
897                         .encrypt                =       ctr_aes_encrypt,
898                         .decrypt                =       ctr_aes_decrypt,
899                 }
900         }
901 };
902
903 static int ctr_aes_alg_reg;
904
905 static int __init aes_s390_init(void)
906 {
907         int ret;
908
909         if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
910                 keylen_flag |= AES_KEYLEN_128;
911         if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
912                 keylen_flag |= AES_KEYLEN_192;
913         if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
914                 keylen_flag |= AES_KEYLEN_256;
915
916         if (!keylen_flag)
917                 return -EOPNOTSUPP;
918
919         /* z9 109 and z9 BC/EC only support 128 bit key length */
920         if (keylen_flag == AES_KEYLEN_128)
921                 pr_info("AES hardware acceleration is only available for"
922                         " 128-bit keys\n");
923
924         ret = crypto_register_alg(&aes_alg);
925         if (ret)
926                 goto aes_err;
927
928         ret = crypto_register_alg(&ecb_aes_alg);
929         if (ret)
930                 goto ecb_aes_err;
931
932         ret = crypto_register_alg(&cbc_aes_alg);
933         if (ret)
934                 goto cbc_aes_err;
935
936         if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
937             cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
938                 ret = crypto_register_alg(&xts_aes_alg);
939                 if (ret)
940                         goto xts_aes_err;
941                 xts_aes_alg_reg = 1;
942         }
943
944         if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
945             cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
946             cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
947                 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
948                 if (!ctrblk) {
949                         ret = -ENOMEM;
950                         goto ctr_aes_err;
951                 }
952                 ret = crypto_register_alg(&ctr_aes_alg);
953                 if (ret) {
954                         free_page((unsigned long) ctrblk);
955                         goto ctr_aes_err;
956                 }
957                 ctr_aes_alg_reg = 1;
958         }
959
960 out:
961         return ret;
962
963 ctr_aes_err:
964         crypto_unregister_alg(&xts_aes_alg);
965 xts_aes_err:
966         crypto_unregister_alg(&cbc_aes_alg);
967 cbc_aes_err:
968         crypto_unregister_alg(&ecb_aes_alg);
969 ecb_aes_err:
970         crypto_unregister_alg(&aes_alg);
971 aes_err:
972         goto out;
973 }
974
975 static void __exit aes_s390_fini(void)
976 {
977         if (ctr_aes_alg_reg) {
978                 crypto_unregister_alg(&ctr_aes_alg);
979                 free_page((unsigned long) ctrblk);
980         }
981         if (xts_aes_alg_reg)
982                 crypto_unregister_alg(&xts_aes_alg);
983         crypto_unregister_alg(&cbc_aes_alg);
984         crypto_unregister_alg(&ecb_aes_alg);
985         crypto_unregister_alg(&aes_alg);
986 }
987
988 module_cpu_feature_match(MSA, aes_s390_init);
989 module_exit(aes_s390_fini);
990
991 MODULE_ALIAS_CRYPTO("aes-all");
992
993 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
994 MODULE_LICENSE("GPL");