Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / crypto / skcipher.c
1 /*
2  * Symmetric key cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
28
29 #include "internal.h"
30
31 enum {
32         SKCIPHER_WALK_PHYS = 1 << 0,
33         SKCIPHER_WALK_SLOW = 1 << 1,
34         SKCIPHER_WALK_COPY = 1 << 2,
35         SKCIPHER_WALK_DIFF = 1 << 3,
36         SKCIPHER_WALK_SLEEP = 1 << 4,
37 };
38
39 struct skcipher_walk_buffer {
40         struct list_head entry;
41         struct scatter_walk dst;
42         unsigned int len;
43         u8 *data;
44         u8 buffer[];
45 };
46
47 static int skcipher_walk_next(struct skcipher_walk *walk);
48
49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
50 {
51         if (PageHighMem(scatterwalk_page(walk)))
52                 kunmap_atomic(vaddr);
53 }
54
55 static inline void *skcipher_map(struct scatter_walk *walk)
56 {
57         struct page *page = scatterwalk_page(walk);
58
59         return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60                offset_in_page(walk->offset);
61 }
62
63 static inline void skcipher_map_src(struct skcipher_walk *walk)
64 {
65         walk->src.virt.addr = skcipher_map(&walk->in);
66 }
67
68 static inline void skcipher_map_dst(struct skcipher_walk *walk)
69 {
70         walk->dst.virt.addr = skcipher_map(&walk->out);
71 }
72
73 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
74 {
75         skcipher_unmap(&walk->in, walk->src.virt.addr);
76 }
77
78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
79 {
80         skcipher_unmap(&walk->out, walk->dst.virt.addr);
81 }
82
83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
84 {
85         return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
86 }
87
88 /* Get a spot of the specified length that does not straddle a page.
89  * The caller needs to ensure that there is enough space for this operation.
90  */
91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
92 {
93         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
94
95         return max(start, end_page);
96 }
97
98 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
99 {
100         u8 *addr;
101
102         addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103         addr = skcipher_get_spot(addr, bsize);
104         scatterwalk_copychunks(addr, &walk->out, bsize,
105                                (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
106         return 0;
107 }
108
109 int skcipher_walk_done(struct skcipher_walk *walk, int err)
110 {
111         unsigned int n = walk->nbytes - err;
112         unsigned int nbytes;
113
114         nbytes = walk->total - n;
115
116         if (unlikely(err < 0)) {
117                 nbytes = 0;
118                 n = 0;
119         } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
120                                            SKCIPHER_WALK_SLOW |
121                                            SKCIPHER_WALK_COPY |
122                                            SKCIPHER_WALK_DIFF)))) {
123 unmap_src:
124                 skcipher_unmap_src(walk);
125         } else if (walk->flags & SKCIPHER_WALK_DIFF) {
126                 skcipher_unmap_dst(walk);
127                 goto unmap_src;
128         } else if (walk->flags & SKCIPHER_WALK_COPY) {
129                 skcipher_map_dst(walk);
130                 memcpy(walk->dst.virt.addr, walk->page, n);
131                 skcipher_unmap_dst(walk);
132         } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
133                 if (WARN_ON(err)) {
134                         err = -EINVAL;
135                         nbytes = 0;
136                 } else
137                         n = skcipher_done_slow(walk, n);
138         }
139
140         if (err > 0)
141                 err = 0;
142
143         walk->total = nbytes;
144         walk->nbytes = nbytes;
145
146         scatterwalk_advance(&walk->in, n);
147         scatterwalk_advance(&walk->out, n);
148         scatterwalk_done(&walk->in, 0, nbytes);
149         scatterwalk_done(&walk->out, 1, nbytes);
150
151         if (nbytes) {
152                 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
153                              CRYPTO_TFM_REQ_MAY_SLEEP : 0);
154                 return skcipher_walk_next(walk);
155         }
156
157         /* Short-circuit for the common/fast path. */
158         if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
159                 goto out;
160
161         if (walk->flags & SKCIPHER_WALK_PHYS)
162                 goto out;
163
164         if (walk->iv != walk->oiv)
165                 memcpy(walk->oiv, walk->iv, walk->ivsize);
166         if (walk->buffer != walk->page)
167                 kfree(walk->buffer);
168         if (walk->page)
169                 free_page((unsigned long)walk->page);
170
171 out:
172         return err;
173 }
174 EXPORT_SYMBOL_GPL(skcipher_walk_done);
175
176 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
177 {
178         struct skcipher_walk_buffer *p, *tmp;
179
180         list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
181                 u8 *data;
182
183                 if (err)
184                         goto done;
185
186                 data = p->data;
187                 if (!data) {
188                         data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
189                         data = skcipher_get_spot(data, walk->stride);
190                 }
191
192                 scatterwalk_copychunks(data, &p->dst, p->len, 1);
193
194                 if (offset_in_page(p->data) + p->len + walk->stride >
195                     PAGE_SIZE)
196                         free_page((unsigned long)p->data);
197
198 done:
199                 list_del(&p->entry);
200                 kfree(p);
201         }
202
203         if (!err && walk->iv != walk->oiv)
204                 memcpy(walk->oiv, walk->iv, walk->ivsize);
205         if (walk->buffer != walk->page)
206                 kfree(walk->buffer);
207         if (walk->page)
208                 free_page((unsigned long)walk->page);
209 }
210 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
211
212 static void skcipher_queue_write(struct skcipher_walk *walk,
213                                  struct skcipher_walk_buffer *p)
214 {
215         p->dst = walk->out;
216         list_add_tail(&p->entry, &walk->buffers);
217 }
218
219 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
220 {
221         bool phys = walk->flags & SKCIPHER_WALK_PHYS;
222         unsigned alignmask = walk->alignmask;
223         struct skcipher_walk_buffer *p;
224         unsigned a;
225         unsigned n;
226         u8 *buffer;
227         void *v;
228
229         if (!phys) {
230                 if (!walk->buffer)
231                         walk->buffer = walk->page;
232                 buffer = walk->buffer;
233                 if (buffer)
234                         goto ok;
235         }
236
237         /* Start with the minimum alignment of kmalloc. */
238         a = crypto_tfm_ctx_alignment() - 1;
239         n = bsize;
240
241         if (phys) {
242                 /* Calculate the minimum alignment of p->buffer. */
243                 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
244                 n += sizeof(*p);
245         }
246
247         /* Minimum size to align p->buffer by alignmask. */
248         n += alignmask & ~a;
249
250         /* Minimum size to ensure p->buffer does not straddle a page. */
251         n += (bsize - 1) & ~(alignmask | a);
252
253         v = kzalloc(n, skcipher_walk_gfp(walk));
254         if (!v)
255                 return skcipher_walk_done(walk, -ENOMEM);
256
257         if (phys) {
258                 p = v;
259                 p->len = bsize;
260                 skcipher_queue_write(walk, p);
261                 buffer = p->buffer;
262         } else {
263                 walk->buffer = v;
264                 buffer = v;
265         }
266
267 ok:
268         walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
269         walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
270         walk->src.virt.addr = walk->dst.virt.addr;
271
272         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
273
274         walk->nbytes = bsize;
275         walk->flags |= SKCIPHER_WALK_SLOW;
276
277         return 0;
278 }
279
280 static int skcipher_next_copy(struct skcipher_walk *walk)
281 {
282         struct skcipher_walk_buffer *p;
283         u8 *tmp = walk->page;
284
285         skcipher_map_src(walk);
286         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
287         skcipher_unmap_src(walk);
288
289         walk->src.virt.addr = tmp;
290         walk->dst.virt.addr = tmp;
291
292         if (!(walk->flags & SKCIPHER_WALK_PHYS))
293                 return 0;
294
295         p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
296         if (!p)
297                 return -ENOMEM;
298
299         p->data = walk->page;
300         p->len = walk->nbytes;
301         skcipher_queue_write(walk, p);
302
303         if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
304             PAGE_SIZE)
305                 walk->page = NULL;
306         else
307                 walk->page += walk->nbytes;
308
309         return 0;
310 }
311
312 static int skcipher_next_fast(struct skcipher_walk *walk)
313 {
314         unsigned long diff;
315
316         walk->src.phys.page = scatterwalk_page(&walk->in);
317         walk->src.phys.offset = offset_in_page(walk->in.offset);
318         walk->dst.phys.page = scatterwalk_page(&walk->out);
319         walk->dst.phys.offset = offset_in_page(walk->out.offset);
320
321         if (walk->flags & SKCIPHER_WALK_PHYS)
322                 return 0;
323
324         diff = walk->src.phys.offset - walk->dst.phys.offset;
325         diff |= walk->src.virt.page - walk->dst.virt.page;
326
327         skcipher_map_src(walk);
328         walk->dst.virt.addr = walk->src.virt.addr;
329
330         if (diff) {
331                 walk->flags |= SKCIPHER_WALK_DIFF;
332                 skcipher_map_dst(walk);
333         }
334
335         return 0;
336 }
337
338 static int skcipher_walk_next(struct skcipher_walk *walk)
339 {
340         unsigned int bsize;
341         unsigned int n;
342         int err;
343
344         walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
345                          SKCIPHER_WALK_DIFF);
346
347         n = walk->total;
348         bsize = min(walk->stride, max(n, walk->blocksize));
349         n = scatterwalk_clamp(&walk->in, n);
350         n = scatterwalk_clamp(&walk->out, n);
351
352         if (unlikely(n < bsize)) {
353                 if (unlikely(walk->total < walk->blocksize))
354                         return skcipher_walk_done(walk, -EINVAL);
355
356 slow_path:
357                 err = skcipher_next_slow(walk, bsize);
358                 goto set_phys_lowmem;
359         }
360
361         if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
362                 if (!walk->page) {
363                         gfp_t gfp = skcipher_walk_gfp(walk);
364
365                         walk->page = (void *)__get_free_page(gfp);
366                         if (!walk->page)
367                                 goto slow_path;
368                 }
369
370                 walk->nbytes = min_t(unsigned, n,
371                                      PAGE_SIZE - offset_in_page(walk->page));
372                 walk->flags |= SKCIPHER_WALK_COPY;
373                 err = skcipher_next_copy(walk);
374                 goto set_phys_lowmem;
375         }
376
377         walk->nbytes = n;
378
379         return skcipher_next_fast(walk);
380
381 set_phys_lowmem:
382         if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
383                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
384                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
385                 walk->src.phys.offset &= PAGE_SIZE - 1;
386                 walk->dst.phys.offset &= PAGE_SIZE - 1;
387         }
388         return err;
389 }
390 EXPORT_SYMBOL_GPL(skcipher_walk_next);
391
392 static int skcipher_copy_iv(struct skcipher_walk *walk)
393 {
394         unsigned a = crypto_tfm_ctx_alignment() - 1;
395         unsigned alignmask = walk->alignmask;
396         unsigned ivsize = walk->ivsize;
397         unsigned bs = walk->stride;
398         unsigned aligned_bs;
399         unsigned size;
400         u8 *iv;
401
402         aligned_bs = ALIGN(bs, alignmask);
403
404         /* Minimum size to align buffer by alignmask. */
405         size = alignmask & ~a;
406
407         if (walk->flags & SKCIPHER_WALK_PHYS)
408                 size += ivsize;
409         else {
410                 size += aligned_bs + ivsize;
411
412                 /* Minimum size to ensure buffer does not straddle a page. */
413                 size += (bs - 1) & ~(alignmask | a);
414         }
415
416         walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
417         if (!walk->buffer)
418                 return -ENOMEM;
419
420         iv = PTR_ALIGN(walk->buffer, alignmask + 1);
421         iv = skcipher_get_spot(iv, bs) + aligned_bs;
422
423         walk->iv = memcpy(iv, walk->iv, walk->ivsize);
424         return 0;
425 }
426
427 static int skcipher_walk_first(struct skcipher_walk *walk)
428 {
429         if (WARN_ON_ONCE(in_irq()))
430                 return -EDEADLK;
431
432         walk->buffer = NULL;
433         if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
434                 int err = skcipher_copy_iv(walk);
435                 if (err)
436                         return err;
437         }
438
439         walk->page = NULL;
440         walk->nbytes = walk->total;
441
442         return skcipher_walk_next(walk);
443 }
444
445 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
446                                   struct skcipher_request *req)
447 {
448         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
449
450         walk->total = req->cryptlen;
451         walk->nbytes = 0;
452
453         if (unlikely(!walk->total))
454                 return 0;
455
456         scatterwalk_start(&walk->in, req->src);
457         scatterwalk_start(&walk->out, req->dst);
458
459         walk->iv = req->iv;
460         walk->oiv = req->iv;
461
462         walk->flags &= ~SKCIPHER_WALK_SLEEP;
463         walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
464                        SKCIPHER_WALK_SLEEP : 0;
465
466         walk->blocksize = crypto_skcipher_blocksize(tfm);
467         walk->stride = crypto_skcipher_walksize(tfm);
468         walk->ivsize = crypto_skcipher_ivsize(tfm);
469         walk->alignmask = crypto_skcipher_alignmask(tfm);
470
471         return skcipher_walk_first(walk);
472 }
473
474 int skcipher_walk_virt(struct skcipher_walk *walk,
475                        struct skcipher_request *req, bool atomic)
476 {
477         int err;
478
479         walk->flags &= ~SKCIPHER_WALK_PHYS;
480
481         err = skcipher_walk_skcipher(walk, req);
482
483         walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
484
485         return err;
486 }
487 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
488
489 void skcipher_walk_atomise(struct skcipher_walk *walk)
490 {
491         walk->flags &= ~SKCIPHER_WALK_SLEEP;
492 }
493 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
494
495 int skcipher_walk_async(struct skcipher_walk *walk,
496                         struct skcipher_request *req)
497 {
498         walk->flags |= SKCIPHER_WALK_PHYS;
499
500         INIT_LIST_HEAD(&walk->buffers);
501
502         return skcipher_walk_skcipher(walk, req);
503 }
504 EXPORT_SYMBOL_GPL(skcipher_walk_async);
505
506 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
507                                      struct aead_request *req, bool atomic)
508 {
509         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
510         int err;
511
512         walk->nbytes = 0;
513
514         if (unlikely(!walk->total))
515                 return 0;
516
517         walk->flags &= ~SKCIPHER_WALK_PHYS;
518
519         scatterwalk_start(&walk->in, req->src);
520         scatterwalk_start(&walk->out, req->dst);
521
522         scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
523         scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
524
525         scatterwalk_done(&walk->in, 0, walk->total);
526         scatterwalk_done(&walk->out, 0, walk->total);
527
528         walk->iv = req->iv;
529         walk->oiv = req->iv;
530
531         if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
532                 walk->flags |= SKCIPHER_WALK_SLEEP;
533         else
534                 walk->flags &= ~SKCIPHER_WALK_SLEEP;
535
536         walk->blocksize = crypto_aead_blocksize(tfm);
537         walk->stride = crypto_aead_chunksize(tfm);
538         walk->ivsize = crypto_aead_ivsize(tfm);
539         walk->alignmask = crypto_aead_alignmask(tfm);
540
541         err = skcipher_walk_first(walk);
542
543         if (atomic)
544                 walk->flags &= ~SKCIPHER_WALK_SLEEP;
545
546         return err;
547 }
548
549 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
550                        bool atomic)
551 {
552         walk->total = req->cryptlen;
553
554         return skcipher_walk_aead_common(walk, req, atomic);
555 }
556 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
557
558 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
559                                struct aead_request *req, bool atomic)
560 {
561         walk->total = req->cryptlen;
562
563         return skcipher_walk_aead_common(walk, req, atomic);
564 }
565 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
566
567 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
568                                struct aead_request *req, bool atomic)
569 {
570         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
571
572         walk->total = req->cryptlen - crypto_aead_authsize(tfm);
573
574         return skcipher_walk_aead_common(walk, req, atomic);
575 }
576 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
577
578 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
579 {
580         if (alg->cra_type == &crypto_blkcipher_type)
581                 return sizeof(struct crypto_blkcipher *);
582
583         if (alg->cra_type == &crypto_ablkcipher_type ||
584             alg->cra_type == &crypto_givcipher_type)
585                 return sizeof(struct crypto_ablkcipher *);
586
587         return crypto_alg_extsize(alg);
588 }
589
590 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
591                                      const u8 *key, unsigned int keylen)
592 {
593         struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
594         struct crypto_blkcipher *blkcipher = *ctx;
595         int err;
596
597         crypto_blkcipher_clear_flags(blkcipher, ~0);
598         crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
599                                               CRYPTO_TFM_REQ_MASK);
600         err = crypto_blkcipher_setkey(blkcipher, key, keylen);
601         crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
602                                        CRYPTO_TFM_RES_MASK);
603
604         return err;
605 }
606
607 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
608                                     int (*crypt)(struct blkcipher_desc *,
609                                                  struct scatterlist *,
610                                                  struct scatterlist *,
611                                                  unsigned int))
612 {
613         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
614         struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
615         struct blkcipher_desc desc = {
616                 .tfm = *ctx,
617                 .info = req->iv,
618                 .flags = req->base.flags,
619         };
620
621
622         return crypt(&desc, req->dst, req->src, req->cryptlen);
623 }
624
625 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
626 {
627         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
628         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
629         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
630
631         return skcipher_crypt_blkcipher(req, alg->encrypt);
632 }
633
634 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
635 {
636         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
637         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
638         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
639
640         return skcipher_crypt_blkcipher(req, alg->decrypt);
641 }
642
643 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
644 {
645         struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
646
647         crypto_free_blkcipher(*ctx);
648 }
649
650 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
651 {
652         struct crypto_alg *calg = tfm->__crt_alg;
653         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
654         struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
655         struct crypto_blkcipher *blkcipher;
656         struct crypto_tfm *btfm;
657
658         if (!crypto_mod_get(calg))
659                 return -EAGAIN;
660
661         btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
662                                         CRYPTO_ALG_TYPE_MASK);
663         if (IS_ERR(btfm)) {
664                 crypto_mod_put(calg);
665                 return PTR_ERR(btfm);
666         }
667
668         blkcipher = __crypto_blkcipher_cast(btfm);
669         *ctx = blkcipher;
670         tfm->exit = crypto_exit_skcipher_ops_blkcipher;
671
672         skcipher->setkey = skcipher_setkey_blkcipher;
673         skcipher->encrypt = skcipher_encrypt_blkcipher;
674         skcipher->decrypt = skcipher_decrypt_blkcipher;
675
676         skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
677         skcipher->keysize = calg->cra_blkcipher.max_keysize;
678
679         return 0;
680 }
681
682 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
683                                       const u8 *key, unsigned int keylen)
684 {
685         struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
686         struct crypto_ablkcipher *ablkcipher = *ctx;
687         int err;
688
689         crypto_ablkcipher_clear_flags(ablkcipher, ~0);
690         crypto_ablkcipher_set_flags(ablkcipher,
691                                     crypto_skcipher_get_flags(tfm) &
692                                     CRYPTO_TFM_REQ_MASK);
693         err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
694         crypto_skcipher_set_flags(tfm,
695                                   crypto_ablkcipher_get_flags(ablkcipher) &
696                                   CRYPTO_TFM_RES_MASK);
697
698         return err;
699 }
700
701 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
702                                      int (*crypt)(struct ablkcipher_request *))
703 {
704         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
705         struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
706         struct ablkcipher_request *subreq = skcipher_request_ctx(req);
707
708         ablkcipher_request_set_tfm(subreq, *ctx);
709         ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
710                                         req->base.complete, req->base.data);
711         ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
712                                      req->iv);
713
714         return crypt(subreq);
715 }
716
717 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
718 {
719         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
720         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
721         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
722
723         return skcipher_crypt_ablkcipher(req, alg->encrypt);
724 }
725
726 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
727 {
728         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
729         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
730         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
731
732         return skcipher_crypt_ablkcipher(req, alg->decrypt);
733 }
734
735 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
736 {
737         struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
738
739         crypto_free_ablkcipher(*ctx);
740 }
741
742 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
743 {
744         struct crypto_alg *calg = tfm->__crt_alg;
745         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
746         struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
747         struct crypto_ablkcipher *ablkcipher;
748         struct crypto_tfm *abtfm;
749
750         if (!crypto_mod_get(calg))
751                 return -EAGAIN;
752
753         abtfm = __crypto_alloc_tfm(calg, 0, 0);
754         if (IS_ERR(abtfm)) {
755                 crypto_mod_put(calg);
756                 return PTR_ERR(abtfm);
757         }
758
759         ablkcipher = __crypto_ablkcipher_cast(abtfm);
760         *ctx = ablkcipher;
761         tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
762
763         skcipher->setkey = skcipher_setkey_ablkcipher;
764         skcipher->encrypt = skcipher_encrypt_ablkcipher;
765         skcipher->decrypt = skcipher_decrypt_ablkcipher;
766
767         skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
768         skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
769                             sizeof(struct ablkcipher_request);
770         skcipher->keysize = calg->cra_ablkcipher.max_keysize;
771
772         return 0;
773 }
774
775 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
776                                      const u8 *key, unsigned int keylen)
777 {
778         unsigned long alignmask = crypto_skcipher_alignmask(tfm);
779         struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
780         u8 *buffer, *alignbuffer;
781         unsigned long absize;
782         int ret;
783
784         absize = keylen + alignmask;
785         buffer = kmalloc(absize, GFP_ATOMIC);
786         if (!buffer)
787                 return -ENOMEM;
788
789         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
790         memcpy(alignbuffer, key, keylen);
791         ret = cipher->setkey(tfm, alignbuffer, keylen);
792         kzfree(buffer);
793         return ret;
794 }
795
796 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
797                            unsigned int keylen)
798 {
799         struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
800         unsigned long alignmask = crypto_skcipher_alignmask(tfm);
801
802         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
803                 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
804                 return -EINVAL;
805         }
806
807         if ((unsigned long)key & alignmask)
808                 return skcipher_setkey_unaligned(tfm, key, keylen);
809
810         return cipher->setkey(tfm, key, keylen);
811 }
812
813 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
814 {
815         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
816         struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
817
818         alg->exit(skcipher);
819 }
820
821 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
822 {
823         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
824         struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
825
826         if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
827                 return crypto_init_skcipher_ops_blkcipher(tfm);
828
829         if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
830             tfm->__crt_alg->cra_type == &crypto_givcipher_type)
831                 return crypto_init_skcipher_ops_ablkcipher(tfm);
832
833         skcipher->setkey = skcipher_setkey;
834         skcipher->encrypt = alg->encrypt;
835         skcipher->decrypt = alg->decrypt;
836         skcipher->ivsize = alg->ivsize;
837         skcipher->keysize = alg->max_keysize;
838
839         if (alg->exit)
840                 skcipher->base.exit = crypto_skcipher_exit_tfm;
841
842         if (alg->init)
843                 return alg->init(skcipher);
844
845         return 0;
846 }
847
848 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
849 {
850         struct skcipher_instance *skcipher =
851                 container_of(inst, struct skcipher_instance, s.base);
852
853         skcipher->free(skcipher);
854 }
855
856 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
857         __maybe_unused;
858 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
859 {
860         struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
861                                                      base);
862
863         seq_printf(m, "type         : skcipher\n");
864         seq_printf(m, "async        : %s\n",
865                    alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
866         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
867         seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
868         seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
869         seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
870         seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
871         seq_printf(m, "walksize     : %u\n", skcipher->walksize);
872 }
873
874 #ifdef CONFIG_NET
875 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
876 {
877         struct crypto_report_blkcipher rblkcipher;
878         struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
879                                                      base);
880
881         strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
882         strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
883
884         rblkcipher.blocksize = alg->cra_blocksize;
885         rblkcipher.min_keysize = skcipher->min_keysize;
886         rblkcipher.max_keysize = skcipher->max_keysize;
887         rblkcipher.ivsize = skcipher->ivsize;
888
889         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
890                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
891                 goto nla_put_failure;
892         return 0;
893
894 nla_put_failure:
895         return -EMSGSIZE;
896 }
897 #else
898 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
899 {
900         return -ENOSYS;
901 }
902 #endif
903
904 static const struct crypto_type crypto_skcipher_type2 = {
905         .extsize = crypto_skcipher_extsize,
906         .init_tfm = crypto_skcipher_init_tfm,
907         .free = crypto_skcipher_free_instance,
908 #ifdef CONFIG_PROC_FS
909         .show = crypto_skcipher_show,
910 #endif
911         .report = crypto_skcipher_report,
912         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
913         .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
914         .type = CRYPTO_ALG_TYPE_SKCIPHER,
915         .tfmsize = offsetof(struct crypto_skcipher, base),
916 };
917
918 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
919                           const char *name, u32 type, u32 mask)
920 {
921         spawn->base.frontend = &crypto_skcipher_type2;
922         return crypto_grab_spawn(&spawn->base, name, type, mask);
923 }
924 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
925
926 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
927                                               u32 type, u32 mask)
928 {
929         return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
930 }
931 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
932
933 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
934 {
935         return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
936                                    type, mask);
937 }
938 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
939
940 static int skcipher_prepare_alg(struct skcipher_alg *alg)
941 {
942         struct crypto_alg *base = &alg->base;
943
944         if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
945             alg->walksize > PAGE_SIZE / 8)
946                 return -EINVAL;
947
948         if (!alg->chunksize)
949                 alg->chunksize = base->cra_blocksize;
950         if (!alg->walksize)
951                 alg->walksize = alg->chunksize;
952
953         base->cra_type = &crypto_skcipher_type2;
954         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
955         base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
956
957         return 0;
958 }
959
960 int crypto_register_skcipher(struct skcipher_alg *alg)
961 {
962         struct crypto_alg *base = &alg->base;
963         int err;
964
965         err = skcipher_prepare_alg(alg);
966         if (err)
967                 return err;
968
969         return crypto_register_alg(base);
970 }
971 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
972
973 void crypto_unregister_skcipher(struct skcipher_alg *alg)
974 {
975         crypto_unregister_alg(&alg->base);
976 }
977 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
978
979 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
980 {
981         int i, ret;
982
983         for (i = 0; i < count; i++) {
984                 ret = crypto_register_skcipher(&algs[i]);
985                 if (ret)
986                         goto err;
987         }
988
989         return 0;
990
991 err:
992         for (--i; i >= 0; --i)
993                 crypto_unregister_skcipher(&algs[i]);
994
995         return ret;
996 }
997 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
998
999 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1000 {
1001         int i;
1002
1003         for (i = count - 1; i >= 0; --i)
1004                 crypto_unregister_skcipher(&algs[i]);
1005 }
1006 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1007
1008 int skcipher_register_instance(struct crypto_template *tmpl,
1009                            struct skcipher_instance *inst)
1010 {
1011         int err;
1012
1013         err = skcipher_prepare_alg(&inst->alg);
1014         if (err)
1015                 return err;
1016
1017         return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1018 }
1019 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1020
1021 MODULE_LICENSE("GPL");
1022 MODULE_DESCRIPTION("Symmetric key cipher type");