perf/x86: Reject non sampling events with precise_ip
[sfrench/cifs-2.6.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/module.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
30 #include <crypto/b128ops.h>
31 #include <crypto/xts.h>
32 #include <asm/cpu_device_id.h>
33 #include <asm/fpu/api.h>
34 #include <asm/crypto/aes.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/internal/simd.h>
38 #include <crypto/internal/skcipher.h>
39 #include <linux/workqueue.h>
40 #include <linux/spinlock.h>
41 #ifdef CONFIG_X86_64
42 #include <asm/crypto/glue_helper.h>
43 #endif
44
45
46 #define AESNI_ALIGN     16
47 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
48 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
51 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
52 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
53
54 /* This data is stored at the end of the crypto_tfm struct.
55  * It's a type of per "session" data storage location.
56  * This needs to be 16 byte aligned.
57  */
58 struct aesni_rfc4106_gcm_ctx {
59         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
60         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
61         u8 nonce[4];
62 };
63
64 struct aesni_xts_ctx {
65         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
67 };
68
69 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
70                              unsigned int key_len);
71 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
72                           const u8 *in);
73 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
74                           const u8 *in);
75 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
76                               const u8 *in, unsigned int len);
77 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
78                               const u8 *in, unsigned int len);
79 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
80                               const u8 *in, unsigned int len, u8 *iv);
81 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
82                               const u8 *in, unsigned int len, u8 *iv);
83
84 int crypto_fpu_init(void);
85 void crypto_fpu_exit(void);
86
87 #define AVX_GEN2_OPTSIZE 640
88 #define AVX_GEN4_OPTSIZE 4096
89
90 #ifdef CONFIG_X86_64
91
92 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
93                               const u8 *in, unsigned int len, u8 *iv);
94 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
95                               const u8 *in, unsigned int len, u8 *iv);
96
97 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
98                                  const u8 *in, bool enc, u8 *iv);
99
100 /* asmlinkage void aesni_gcm_enc()
101  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
102  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
103  * const u8 *in, Plaintext input
104  * unsigned long plaintext_len, Length of data in bytes for encryption.
105  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
106  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
107  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
108  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
109  * const u8 *aad, Additional Authentication Data (AAD)
110  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
111  *          is going to be 8 or 12 bytes
112  * u8 *auth_tag, Authenticated Tag output.
113  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
114  *          Valid values are 16 (most likely), 12 or 8.
115  */
116 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
117                         const u8 *in, unsigned long plaintext_len, u8 *iv,
118                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
119                         u8 *auth_tag, unsigned long auth_tag_len);
120
121 /* asmlinkage void aesni_gcm_dec()
122  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
123  * u8 *out, Plaintext output. Decrypt in-place is allowed.
124  * const u8 *in, Ciphertext input
125  * unsigned long ciphertext_len, Length of data in bytes for decryption.
126  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
127  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
128  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
129  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
130  * const u8 *aad, Additional Authentication Data (AAD)
131  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
132  * to be 8 or 12 bytes
133  * u8 *auth_tag, Authenticated Tag output.
134  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
135  * Valid values are 16 (most likely), 12 or 8.
136  */
137 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
138                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
139                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
140                         u8 *auth_tag, unsigned long auth_tag_len);
141
142
143 #ifdef CONFIG_AS_AVX
144 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
145                 void *keys, u8 *out, unsigned int num_bytes);
146 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
147                 void *keys, u8 *out, unsigned int num_bytes);
148 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
149                 void *keys, u8 *out, unsigned int num_bytes);
150 /*
151  * asmlinkage void aesni_gcm_precomp_avx_gen2()
152  * gcm_data *my_ctx_data, context data
153  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
154  */
155 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
156
157 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
158                         const u8 *in, unsigned long plaintext_len, u8 *iv,
159                         const u8 *aad, unsigned long aad_len,
160                         u8 *auth_tag, unsigned long auth_tag_len);
161
162 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
163                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
164                         const u8 *aad, unsigned long aad_len,
165                         u8 *auth_tag, unsigned long auth_tag_len);
166
167 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
168                         const u8 *in, unsigned long plaintext_len, u8 *iv,
169                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
170                         u8 *auth_tag, unsigned long auth_tag_len)
171 {
172         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
173         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
174                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
175                                 aad_len, auth_tag, auth_tag_len);
176         } else {
177                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
178                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
179                                         aad_len, auth_tag, auth_tag_len);
180         }
181 }
182
183 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
184                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
185                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
186                         u8 *auth_tag, unsigned long auth_tag_len)
187 {
188         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
189         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
190                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
191                                 aad_len, auth_tag, auth_tag_len);
192         } else {
193                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
194                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
195                                         aad_len, auth_tag, auth_tag_len);
196         }
197 }
198 #endif
199
200 #ifdef CONFIG_AS_AVX2
201 /*
202  * asmlinkage void aesni_gcm_precomp_avx_gen4()
203  * gcm_data *my_ctx_data, context data
204  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
205  */
206 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
207
208 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
209                         const u8 *in, unsigned long plaintext_len, u8 *iv,
210                         const u8 *aad, unsigned long aad_len,
211                         u8 *auth_tag, unsigned long auth_tag_len);
212
213 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
214                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
215                         const u8 *aad, unsigned long aad_len,
216                         u8 *auth_tag, unsigned long auth_tag_len);
217
218 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
219                         const u8 *in, unsigned long plaintext_len, u8 *iv,
220                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
221                         u8 *auth_tag, unsigned long auth_tag_len)
222 {
223        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
224         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
225                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
226                                 aad_len, auth_tag, auth_tag_len);
227         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
228                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
229                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
230                                         aad_len, auth_tag, auth_tag_len);
231         } else {
232                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
233                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
234                                         aad_len, auth_tag, auth_tag_len);
235         }
236 }
237
238 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
239                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
240                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
241                         u8 *auth_tag, unsigned long auth_tag_len)
242 {
243        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
244         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
245                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
246                                 aad, aad_len, auth_tag, auth_tag_len);
247         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
248                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
249                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
250                                         aad_len, auth_tag, auth_tag_len);
251         } else {
252                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
253                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
254                                         aad_len, auth_tag, auth_tag_len);
255         }
256 }
257 #endif
258
259 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
260                         const u8 *in, unsigned long plaintext_len, u8 *iv,
261                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
262                         u8 *auth_tag, unsigned long auth_tag_len);
263
264 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
265                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
266                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
267                         u8 *auth_tag, unsigned long auth_tag_len);
268
269 static inline struct
270 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
271 {
272         unsigned long align = AESNI_ALIGN;
273
274         if (align <= crypto_tfm_ctx_alignment())
275                 align = 1;
276         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
277 }
278 #endif
279
280 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
281 {
282         unsigned long addr = (unsigned long)raw_ctx;
283         unsigned long align = AESNI_ALIGN;
284
285         if (align <= crypto_tfm_ctx_alignment())
286                 align = 1;
287         return (struct crypto_aes_ctx *)ALIGN(addr, align);
288 }
289
290 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
291                               const u8 *in_key, unsigned int key_len)
292 {
293         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
294         u32 *flags = &tfm->crt_flags;
295         int err;
296
297         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
298             key_len != AES_KEYSIZE_256) {
299                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
300                 return -EINVAL;
301         }
302
303         if (!irq_fpu_usable())
304                 err = crypto_aes_expand_key(ctx, in_key, key_len);
305         else {
306                 kernel_fpu_begin();
307                 err = aesni_set_key(ctx, in_key, key_len);
308                 kernel_fpu_end();
309         }
310
311         return err;
312 }
313
314 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
315                        unsigned int key_len)
316 {
317         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
318 }
319
320 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
321 {
322         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
323
324         if (!irq_fpu_usable())
325                 crypto_aes_encrypt_x86(ctx, dst, src);
326         else {
327                 kernel_fpu_begin();
328                 aesni_enc(ctx, dst, src);
329                 kernel_fpu_end();
330         }
331 }
332
333 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
334 {
335         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
336
337         if (!irq_fpu_usable())
338                 crypto_aes_decrypt_x86(ctx, dst, src);
339         else {
340                 kernel_fpu_begin();
341                 aesni_dec(ctx, dst, src);
342                 kernel_fpu_end();
343         }
344 }
345
346 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
347 {
348         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
349
350         aesni_enc(ctx, dst, src);
351 }
352
353 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
354 {
355         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
356
357         aesni_dec(ctx, dst, src);
358 }
359
360 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
361                                  unsigned int len)
362 {
363         return aes_set_key_common(crypto_skcipher_tfm(tfm),
364                                   crypto_skcipher_ctx(tfm), key, len);
365 }
366
367 static int ecb_encrypt(struct skcipher_request *req)
368 {
369         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
370         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
371         struct skcipher_walk walk;
372         unsigned int nbytes;
373         int err;
374
375         err = skcipher_walk_virt(&walk, req, true);
376
377         kernel_fpu_begin();
378         while ((nbytes = walk.nbytes)) {
379                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
380                               nbytes & AES_BLOCK_MASK);
381                 nbytes &= AES_BLOCK_SIZE - 1;
382                 err = skcipher_walk_done(&walk, nbytes);
383         }
384         kernel_fpu_end();
385
386         return err;
387 }
388
389 static int ecb_decrypt(struct skcipher_request *req)
390 {
391         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
392         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
393         struct skcipher_walk walk;
394         unsigned int nbytes;
395         int err;
396
397         err = skcipher_walk_virt(&walk, req, true);
398
399         kernel_fpu_begin();
400         while ((nbytes = walk.nbytes)) {
401                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
402                               nbytes & AES_BLOCK_MASK);
403                 nbytes &= AES_BLOCK_SIZE - 1;
404                 err = skcipher_walk_done(&walk, nbytes);
405         }
406         kernel_fpu_end();
407
408         return err;
409 }
410
411 static int cbc_encrypt(struct skcipher_request *req)
412 {
413         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
414         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
415         struct skcipher_walk walk;
416         unsigned int nbytes;
417         int err;
418
419         err = skcipher_walk_virt(&walk, req, true);
420
421         kernel_fpu_begin();
422         while ((nbytes = walk.nbytes)) {
423                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
424                               nbytes & AES_BLOCK_MASK, walk.iv);
425                 nbytes &= AES_BLOCK_SIZE - 1;
426                 err = skcipher_walk_done(&walk, nbytes);
427         }
428         kernel_fpu_end();
429
430         return err;
431 }
432
433 static int cbc_decrypt(struct skcipher_request *req)
434 {
435         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
436         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
437         struct skcipher_walk walk;
438         unsigned int nbytes;
439         int err;
440
441         err = skcipher_walk_virt(&walk, req, true);
442
443         kernel_fpu_begin();
444         while ((nbytes = walk.nbytes)) {
445                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
446                               nbytes & AES_BLOCK_MASK, walk.iv);
447                 nbytes &= AES_BLOCK_SIZE - 1;
448                 err = skcipher_walk_done(&walk, nbytes);
449         }
450         kernel_fpu_end();
451
452         return err;
453 }
454
455 #ifdef CONFIG_X86_64
456 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
457                             struct skcipher_walk *walk)
458 {
459         u8 *ctrblk = walk->iv;
460         u8 keystream[AES_BLOCK_SIZE];
461         u8 *src = walk->src.virt.addr;
462         u8 *dst = walk->dst.virt.addr;
463         unsigned int nbytes = walk->nbytes;
464
465         aesni_enc(ctx, keystream, ctrblk);
466         crypto_xor(keystream, src, nbytes);
467         memcpy(dst, keystream, nbytes);
468         crypto_inc(ctrblk, AES_BLOCK_SIZE);
469 }
470
471 #ifdef CONFIG_AS_AVX
472 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
473                               const u8 *in, unsigned int len, u8 *iv)
474 {
475         /*
476          * based on key length, override with the by8 version
477          * of ctr mode encryption/decryption for improved performance
478          * aes_set_key_common() ensures that key length is one of
479          * {128,192,256}
480          */
481         if (ctx->key_length == AES_KEYSIZE_128)
482                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
483         else if (ctx->key_length == AES_KEYSIZE_192)
484                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
485         else
486                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
487 }
488 #endif
489
490 static int ctr_crypt(struct skcipher_request *req)
491 {
492         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
493         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
494         struct skcipher_walk walk;
495         unsigned int nbytes;
496         int err;
497
498         err = skcipher_walk_virt(&walk, req, true);
499
500         kernel_fpu_begin();
501         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
502                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
503                                       nbytes & AES_BLOCK_MASK, walk.iv);
504                 nbytes &= AES_BLOCK_SIZE - 1;
505                 err = skcipher_walk_done(&walk, nbytes);
506         }
507         if (walk.nbytes) {
508                 ctr_crypt_final(ctx, &walk);
509                 err = skcipher_walk_done(&walk, 0);
510         }
511         kernel_fpu_end();
512
513         return err;
514 }
515
516 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
517                             unsigned int keylen)
518 {
519         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
520         int err;
521
522         err = xts_verify_key(tfm, key, keylen);
523         if (err)
524                 return err;
525
526         keylen /= 2;
527
528         /* first half of xts-key is for crypt */
529         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
530                                  key, keylen);
531         if (err)
532                 return err;
533
534         /* second half of xts-key is for tweak */
535         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
536                                   key + keylen, keylen);
537 }
538
539
540 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
541 {
542         aesni_enc(ctx, out, in);
543 }
544
545 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
546 {
547         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
548 }
549
550 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
551 {
552         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
553 }
554
555 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
556 {
557         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
558 }
559
560 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
561 {
562         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
563 }
564
565 static const struct common_glue_ctx aesni_enc_xts = {
566         .num_funcs = 2,
567         .fpu_blocks_limit = 1,
568
569         .funcs = { {
570                 .num_blocks = 8,
571                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
572         }, {
573                 .num_blocks = 1,
574                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
575         } }
576 };
577
578 static const struct common_glue_ctx aesni_dec_xts = {
579         .num_funcs = 2,
580         .fpu_blocks_limit = 1,
581
582         .funcs = { {
583                 .num_blocks = 8,
584                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
585         }, {
586                 .num_blocks = 1,
587                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
588         } }
589 };
590
591 static int xts_encrypt(struct skcipher_request *req)
592 {
593         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
594         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
595
596         return glue_xts_req_128bit(&aesni_enc_xts, req,
597                                    XTS_TWEAK_CAST(aesni_xts_tweak),
598                                    aes_ctx(ctx->raw_tweak_ctx),
599                                    aes_ctx(ctx->raw_crypt_ctx));
600 }
601
602 static int xts_decrypt(struct skcipher_request *req)
603 {
604         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
605         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
606
607         return glue_xts_req_128bit(&aesni_dec_xts, req,
608                                    XTS_TWEAK_CAST(aesni_xts_tweak),
609                                    aes_ctx(ctx->raw_tweak_ctx),
610                                    aes_ctx(ctx->raw_crypt_ctx));
611 }
612
613 static int rfc4106_init(struct crypto_aead *aead)
614 {
615         struct cryptd_aead *cryptd_tfm;
616         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
617
618         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
619                                        CRYPTO_ALG_INTERNAL,
620                                        CRYPTO_ALG_INTERNAL);
621         if (IS_ERR(cryptd_tfm))
622                 return PTR_ERR(cryptd_tfm);
623
624         *ctx = cryptd_tfm;
625         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
626         return 0;
627 }
628
629 static void rfc4106_exit(struct crypto_aead *aead)
630 {
631         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
632
633         cryptd_free_aead(*ctx);
634 }
635
636 static int
637 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
638 {
639         struct crypto_cipher *tfm;
640         int ret;
641
642         tfm = crypto_alloc_cipher("aes", 0, 0);
643         if (IS_ERR(tfm))
644                 return PTR_ERR(tfm);
645
646         ret = crypto_cipher_setkey(tfm, key, key_len);
647         if (ret)
648                 goto out_free_cipher;
649
650         /* Clear the data in the hash sub key container to zero.*/
651         /* We want to cipher all zeros to create the hash sub key. */
652         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
653
654         crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
655
656 out_free_cipher:
657         crypto_free_cipher(tfm);
658         return ret;
659 }
660
661 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
662                                   unsigned int key_len)
663 {
664         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
665
666         if (key_len < 4) {
667                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
668                 return -EINVAL;
669         }
670         /*Account for 4 byte nonce at the end.*/
671         key_len -= 4;
672
673         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
674
675         return aes_set_key_common(crypto_aead_tfm(aead),
676                                   &ctx->aes_key_expanded, key, key_len) ?:
677                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
678 }
679
680 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
681                            unsigned int key_len)
682 {
683         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
684         struct cryptd_aead *cryptd_tfm = *ctx;
685
686         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
687 }
688
689 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
690                                        unsigned int authsize)
691 {
692         switch (authsize) {
693         case 8:
694         case 12:
695         case 16:
696                 break;
697         default:
698                 return -EINVAL;
699         }
700
701         return 0;
702 }
703
704 /* This is the Integrity Check Value (aka the authentication tag length and can
705  * be 8, 12 or 16 bytes long. */
706 static int rfc4106_set_authsize(struct crypto_aead *parent,
707                                 unsigned int authsize)
708 {
709         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
710         struct cryptd_aead *cryptd_tfm = *ctx;
711
712         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
713 }
714
715 static int helper_rfc4106_encrypt(struct aead_request *req)
716 {
717         u8 one_entry_in_sg = 0;
718         u8 *src, *dst, *assoc;
719         __be32 counter = cpu_to_be32(1);
720         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
721         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
722         void *aes_ctx = &(ctx->aes_key_expanded);
723         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
724         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
725         struct scatter_walk src_sg_walk;
726         struct scatter_walk dst_sg_walk = {};
727         unsigned int i;
728
729         /* Assuming we are supporting rfc4106 64-bit extended */
730         /* sequence numbers We need to have the AAD length equal */
731         /* to 16 or 20 bytes */
732         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
733                 return -EINVAL;
734
735         /* IV below built */
736         for (i = 0; i < 4; i++)
737                 *(iv+i) = ctx->nonce[i];
738         for (i = 0; i < 8; i++)
739                 *(iv+4+i) = req->iv[i];
740         *((__be32 *)(iv+12)) = counter;
741
742         if (sg_is_last(req->src) &&
743             req->src->offset + req->src->length <= PAGE_SIZE &&
744             sg_is_last(req->dst) &&
745             req->dst->offset + req->dst->length <= PAGE_SIZE) {
746                 one_entry_in_sg = 1;
747                 scatterwalk_start(&src_sg_walk, req->src);
748                 assoc = scatterwalk_map(&src_sg_walk);
749                 src = assoc + req->assoclen;
750                 dst = src;
751                 if (unlikely(req->src != req->dst)) {
752                         scatterwalk_start(&dst_sg_walk, req->dst);
753                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
754                 }
755         } else {
756                 /* Allocate memory for src, dst, assoc */
757                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
758                         GFP_ATOMIC);
759                 if (unlikely(!assoc))
760                         return -ENOMEM;
761                 scatterwalk_map_and_copy(assoc, req->src, 0,
762                                          req->assoclen + req->cryptlen, 0);
763                 src = assoc + req->assoclen;
764                 dst = src;
765         }
766
767         kernel_fpu_begin();
768         aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
769                           ctx->hash_subkey, assoc, req->assoclen - 8,
770                           dst + req->cryptlen, auth_tag_len);
771         kernel_fpu_end();
772
773         /* The authTag (aka the Integrity Check Value) needs to be written
774          * back to the packet. */
775         if (one_entry_in_sg) {
776                 if (unlikely(req->src != req->dst)) {
777                         scatterwalk_unmap(dst - req->assoclen);
778                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
779                         scatterwalk_done(&dst_sg_walk, 1, 0);
780                 }
781                 scatterwalk_unmap(assoc);
782                 scatterwalk_advance(&src_sg_walk, req->src->length);
783                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
784         } else {
785                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
786                                          req->cryptlen + auth_tag_len, 1);
787                 kfree(assoc);
788         }
789         return 0;
790 }
791
792 static int helper_rfc4106_decrypt(struct aead_request *req)
793 {
794         u8 one_entry_in_sg = 0;
795         u8 *src, *dst, *assoc;
796         unsigned long tempCipherLen = 0;
797         __be32 counter = cpu_to_be32(1);
798         int retval = 0;
799         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
800         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
801         void *aes_ctx = &(ctx->aes_key_expanded);
802         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
803         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
804         u8 authTag[16];
805         struct scatter_walk src_sg_walk;
806         struct scatter_walk dst_sg_walk = {};
807         unsigned int i;
808
809         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
810                 return -EINVAL;
811
812         /* Assuming we are supporting rfc4106 64-bit extended */
813         /* sequence numbers We need to have the AAD length */
814         /* equal to 16 or 20 bytes */
815
816         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
817         /* IV below built */
818         for (i = 0; i < 4; i++)
819                 *(iv+i) = ctx->nonce[i];
820         for (i = 0; i < 8; i++)
821                 *(iv+4+i) = req->iv[i];
822         *((__be32 *)(iv+12)) = counter;
823
824         if (sg_is_last(req->src) &&
825             req->src->offset + req->src->length <= PAGE_SIZE &&
826             sg_is_last(req->dst) &&
827             req->dst->offset + req->dst->length <= PAGE_SIZE) {
828                 one_entry_in_sg = 1;
829                 scatterwalk_start(&src_sg_walk, req->src);
830                 assoc = scatterwalk_map(&src_sg_walk);
831                 src = assoc + req->assoclen;
832                 dst = src;
833                 if (unlikely(req->src != req->dst)) {
834                         scatterwalk_start(&dst_sg_walk, req->dst);
835                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
836                 }
837
838         } else {
839                 /* Allocate memory for src, dst, assoc */
840                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
841                 if (!assoc)
842                         return -ENOMEM;
843                 scatterwalk_map_and_copy(assoc, req->src, 0,
844                                          req->assoclen + req->cryptlen, 0);
845                 src = assoc + req->assoclen;
846                 dst = src;
847         }
848
849         kernel_fpu_begin();
850         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
851                           ctx->hash_subkey, assoc, req->assoclen - 8,
852                           authTag, auth_tag_len);
853         kernel_fpu_end();
854
855         /* Compare generated tag with passed in tag. */
856         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
857                 -EBADMSG : 0;
858
859         if (one_entry_in_sg) {
860                 if (unlikely(req->src != req->dst)) {
861                         scatterwalk_unmap(dst - req->assoclen);
862                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
863                         scatterwalk_done(&dst_sg_walk, 1, 0);
864                 }
865                 scatterwalk_unmap(assoc);
866                 scatterwalk_advance(&src_sg_walk, req->src->length);
867                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
868         } else {
869                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
870                                          tempCipherLen, 1);
871                 kfree(assoc);
872         }
873         return retval;
874 }
875
876 static int rfc4106_encrypt(struct aead_request *req)
877 {
878         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
879         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
880         struct cryptd_aead *cryptd_tfm = *ctx;
881
882         tfm = &cryptd_tfm->base;
883         if (irq_fpu_usable() && (!in_atomic() ||
884                                  !cryptd_aead_queued(cryptd_tfm)))
885                 tfm = cryptd_aead_child(cryptd_tfm);
886
887         aead_request_set_tfm(req, tfm);
888
889         return crypto_aead_encrypt(req);
890 }
891
892 static int rfc4106_decrypt(struct aead_request *req)
893 {
894         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
895         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
896         struct cryptd_aead *cryptd_tfm = *ctx;
897
898         tfm = &cryptd_tfm->base;
899         if (irq_fpu_usable() && (!in_atomic() ||
900                                  !cryptd_aead_queued(cryptd_tfm)))
901                 tfm = cryptd_aead_child(cryptd_tfm);
902
903         aead_request_set_tfm(req, tfm);
904
905         return crypto_aead_decrypt(req);
906 }
907 #endif
908
909 static struct crypto_alg aesni_algs[] = { {
910         .cra_name               = "aes",
911         .cra_driver_name        = "aes-aesni",
912         .cra_priority           = 300,
913         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
914         .cra_blocksize          = AES_BLOCK_SIZE,
915         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
916         .cra_module             = THIS_MODULE,
917         .cra_u  = {
918                 .cipher = {
919                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
920                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
921                         .cia_setkey             = aes_set_key,
922                         .cia_encrypt            = aes_encrypt,
923                         .cia_decrypt            = aes_decrypt
924                 }
925         }
926 }, {
927         .cra_name               = "__aes",
928         .cra_driver_name        = "__aes-aesni",
929         .cra_priority           = 300,
930         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
931         .cra_blocksize          = AES_BLOCK_SIZE,
932         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
933         .cra_module             = THIS_MODULE,
934         .cra_u  = {
935                 .cipher = {
936                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
937                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
938                         .cia_setkey             = aes_set_key,
939                         .cia_encrypt            = __aes_encrypt,
940                         .cia_decrypt            = __aes_decrypt
941                 }
942         }
943 } };
944
945 static struct skcipher_alg aesni_skciphers[] = {
946         {
947                 .base = {
948                         .cra_name               = "__ecb(aes)",
949                         .cra_driver_name        = "__ecb-aes-aesni",
950                         .cra_priority           = 400,
951                         .cra_flags              = CRYPTO_ALG_INTERNAL,
952                         .cra_blocksize          = AES_BLOCK_SIZE,
953                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
954                         .cra_module             = THIS_MODULE,
955                 },
956                 .min_keysize    = AES_MIN_KEY_SIZE,
957                 .max_keysize    = AES_MAX_KEY_SIZE,
958                 .setkey         = aesni_skcipher_setkey,
959                 .encrypt        = ecb_encrypt,
960                 .decrypt        = ecb_decrypt,
961         }, {
962                 .base = {
963                         .cra_name               = "__cbc(aes)",
964                         .cra_driver_name        = "__cbc-aes-aesni",
965                         .cra_priority           = 400,
966                         .cra_flags              = CRYPTO_ALG_INTERNAL,
967                         .cra_blocksize          = AES_BLOCK_SIZE,
968                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
969                         .cra_module             = THIS_MODULE,
970                 },
971                 .min_keysize    = AES_MIN_KEY_SIZE,
972                 .max_keysize    = AES_MAX_KEY_SIZE,
973                 .ivsize         = AES_BLOCK_SIZE,
974                 .setkey         = aesni_skcipher_setkey,
975                 .encrypt        = cbc_encrypt,
976                 .decrypt        = cbc_decrypt,
977 #ifdef CONFIG_X86_64
978         }, {
979                 .base = {
980                         .cra_name               = "__ctr(aes)",
981                         .cra_driver_name        = "__ctr-aes-aesni",
982                         .cra_priority           = 400,
983                         .cra_flags              = CRYPTO_ALG_INTERNAL,
984                         .cra_blocksize          = 1,
985                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
986                         .cra_module             = THIS_MODULE,
987                 },
988                 .min_keysize    = AES_MIN_KEY_SIZE,
989                 .max_keysize    = AES_MAX_KEY_SIZE,
990                 .ivsize         = AES_BLOCK_SIZE,
991                 .chunksize      = AES_BLOCK_SIZE,
992                 .setkey         = aesni_skcipher_setkey,
993                 .encrypt        = ctr_crypt,
994                 .decrypt        = ctr_crypt,
995         }, {
996                 .base = {
997                         .cra_name               = "__xts(aes)",
998                         .cra_driver_name        = "__xts-aes-aesni",
999                         .cra_priority           = 401,
1000                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1001                         .cra_blocksize          = AES_BLOCK_SIZE,
1002                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
1003                         .cra_module             = THIS_MODULE,
1004                 },
1005                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1006                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1007                 .ivsize         = AES_BLOCK_SIZE,
1008                 .setkey         = xts_aesni_setkey,
1009                 .encrypt        = xts_encrypt,
1010                 .decrypt        = xts_decrypt,
1011 #endif
1012         }
1013 };
1014
1015 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1016
1017 struct {
1018         const char *algname;
1019         const char *drvname;
1020         const char *basename;
1021         struct simd_skcipher_alg *simd;
1022 } aesni_simd_skciphers2[] = {
1023 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1024         {
1025                 .algname        = "pcbc(aes)",
1026                 .drvname        = "pcbc-aes-aesni",
1027                 .basename       = "fpu(pcbc(__aes-aesni))",
1028         },
1029 #endif
1030 };
1031
1032 #ifdef CONFIG_X86_64
1033 static struct aead_alg aesni_aead_algs[] = { {
1034         .setkey                 = common_rfc4106_set_key,
1035         .setauthsize            = common_rfc4106_set_authsize,
1036         .encrypt                = helper_rfc4106_encrypt,
1037         .decrypt                = helper_rfc4106_decrypt,
1038         .ivsize                 = 8,
1039         .maxauthsize            = 16,
1040         .base = {
1041                 .cra_name               = "__gcm-aes-aesni",
1042                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1043                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1044                 .cra_blocksize          = 1,
1045                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1046                 .cra_alignmask          = AESNI_ALIGN - 1,
1047                 .cra_module             = THIS_MODULE,
1048         },
1049 }, {
1050         .init                   = rfc4106_init,
1051         .exit                   = rfc4106_exit,
1052         .setkey                 = rfc4106_set_key,
1053         .setauthsize            = rfc4106_set_authsize,
1054         .encrypt                = rfc4106_encrypt,
1055         .decrypt                = rfc4106_decrypt,
1056         .ivsize                 = 8,
1057         .maxauthsize            = 16,
1058         .base = {
1059                 .cra_name               = "rfc4106(gcm(aes))",
1060                 .cra_driver_name        = "rfc4106-gcm-aesni",
1061                 .cra_priority           = 400,
1062                 .cra_flags              = CRYPTO_ALG_ASYNC,
1063                 .cra_blocksize          = 1,
1064                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1065                 .cra_module             = THIS_MODULE,
1066         },
1067 } };
1068 #else
1069 static struct aead_alg aesni_aead_algs[0];
1070 #endif
1071
1072
1073 static const struct x86_cpu_id aesni_cpu_id[] = {
1074         X86_FEATURE_MATCH(X86_FEATURE_AES),
1075         {}
1076 };
1077 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1078
1079 static void aesni_free_simds(void)
1080 {
1081         int i;
1082
1083         for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1084                     aesni_simd_skciphers[i]; i++)
1085                 simd_skcipher_free(aesni_simd_skciphers[i]);
1086
1087         for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
1088                     aesni_simd_skciphers2[i].simd; i++)
1089                 simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1090 }
1091
1092 static int __init aesni_init(void)
1093 {
1094         struct simd_skcipher_alg *simd;
1095         const char *basename;
1096         const char *algname;
1097         const char *drvname;
1098         int err;
1099         int i;
1100
1101         if (!x86_match_cpu(aesni_cpu_id))
1102                 return -ENODEV;
1103 #ifdef CONFIG_X86_64
1104 #ifdef CONFIG_AS_AVX2
1105         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1106                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1107                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1108                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1109         } else
1110 #endif
1111 #ifdef CONFIG_AS_AVX
1112         if (boot_cpu_has(X86_FEATURE_AVX)) {
1113                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1114                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1115                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1116         } else
1117 #endif
1118         {
1119                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1120                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1121                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1122         }
1123         aesni_ctr_enc_tfm = aesni_ctr_enc;
1124 #ifdef CONFIG_AS_AVX
1125         if (boot_cpu_has(X86_FEATURE_AVX)) {
1126                 /* optimize performance of ctr mode encryption transform */
1127                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1128                 pr_info("AES CTR mode by8 optimization enabled\n");
1129         }
1130 #endif
1131 #endif
1132
1133         err = crypto_fpu_init();
1134         if (err)
1135                 return err;
1136
1137         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1138         if (err)
1139                 goto fpu_exit;
1140
1141         err = crypto_register_skciphers(aesni_skciphers,
1142                                         ARRAY_SIZE(aesni_skciphers));
1143         if (err)
1144                 goto unregister_algs;
1145
1146         err = crypto_register_aeads(aesni_aead_algs,
1147                                     ARRAY_SIZE(aesni_aead_algs));
1148         if (err)
1149                 goto unregister_skciphers;
1150
1151         for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1152                 algname = aesni_skciphers[i].base.cra_name + 2;
1153                 drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1154                 basename = aesni_skciphers[i].base.cra_driver_name;
1155                 simd = simd_skcipher_create_compat(algname, drvname, basename);
1156                 err = PTR_ERR(simd);
1157                 if (IS_ERR(simd))
1158                         goto unregister_simds;
1159
1160                 aesni_simd_skciphers[i] = simd;
1161         }
1162
1163         for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1164                 algname = aesni_simd_skciphers2[i].algname;
1165                 drvname = aesni_simd_skciphers2[i].drvname;
1166                 basename = aesni_simd_skciphers2[i].basename;
1167                 simd = simd_skcipher_create_compat(algname, drvname, basename);
1168                 err = PTR_ERR(simd);
1169                 if (IS_ERR(simd))
1170                         goto unregister_simds;
1171
1172                 aesni_simd_skciphers2[i].simd = simd;
1173         }
1174
1175         return 0;
1176
1177 unregister_simds:
1178         aesni_free_simds();
1179         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1180 unregister_skciphers:
1181         crypto_unregister_skciphers(aesni_skciphers,
1182                                     ARRAY_SIZE(aesni_skciphers));
1183 unregister_algs:
1184         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1185 fpu_exit:
1186         crypto_fpu_exit();
1187         return err;
1188 }
1189
1190 static void __exit aesni_exit(void)
1191 {
1192         aesni_free_simds();
1193         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1194         crypto_unregister_skciphers(aesni_skciphers,
1195                                     ARRAY_SIZE(aesni_skciphers));
1196         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1197
1198         crypto_fpu_exit();
1199 }
1200
1201 late_initcall(aesni_init);
1202 module_exit(aesni_exit);
1203
1204 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1205 MODULE_LICENSE("GPL");
1206 MODULE_ALIAS_CRYPTO("aes");