2 * Accelerated GHASH implementation with ARMv8 PMULL instructions.
4 * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
13 #include <asm/unaligned.h>
14 #include <crypto/aes.h>
15 #include <crypto/algapi.h>
16 #include <crypto/b128ops.h>
17 #include <crypto/gf128mul.h>
18 #include <crypto/internal/aead.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/internal/skcipher.h>
21 #include <crypto/scatterwalk.h>
22 #include <linux/cpufeature.h>
23 #include <linux/crypto.h>
24 #include <linux/module.h>
26 MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions");
27 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
28 MODULE_LICENSE("GPL v2");
29 MODULE_ALIAS_CRYPTO("ghash");
31 #define GHASH_BLOCK_SIZE 16
32 #define GHASH_DIGEST_SIZE 16
33 #define GCM_IV_SIZE 12
44 struct ghash_desc_ctx {
45 u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
46 u8 buf[GHASH_BLOCK_SIZE];
51 struct crypto_aes_ctx aes_key;
52 struct ghash_key ghash_key;
55 asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
56 struct ghash_key const *k,
59 asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
60 struct ghash_key const *k,
63 asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
64 const u8 src[], struct ghash_key const *k,
65 u8 ctr[], u32 const rk[], int rounds,
68 asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
69 const u8 src[], struct ghash_key const *k,
70 u8 ctr[], u32 const rk[], int rounds);
72 asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
73 u32 const rk[], int rounds);
75 asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
77 static int ghash_init(struct shash_desc *desc)
79 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
81 *ctx = (struct ghash_desc_ctx){};
85 static void ghash_do_update(int blocks, u64 dg[], const char *src,
86 struct ghash_key *key, const char *head,
87 void (*simd_update)(int blocks, u64 dg[],
89 struct ghash_key const *k,
92 if (likely(may_use_simd())) {
94 simd_update(blocks, dg, src, key, head);
97 be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
107 src += GHASH_BLOCK_SIZE;
110 crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
111 gf128mul_lle(&dst, &key->k);
114 dg[0] = be64_to_cpu(dst.b);
115 dg[1] = be64_to_cpu(dst.a);
119 /* avoid hogging the CPU for too long */
120 #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
122 static int __ghash_update(struct shash_desc *desc, const u8 *src,
124 void (*simd_update)(int blocks, u64 dg[],
126 struct ghash_key const *k,
129 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
130 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
134 if ((partial + len) >= GHASH_BLOCK_SIZE) {
135 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
139 int p = GHASH_BLOCK_SIZE - partial;
141 memcpy(ctx->buf + partial, src, p);
146 blocks = len / GHASH_BLOCK_SIZE;
147 len %= GHASH_BLOCK_SIZE;
150 int chunk = min(blocks, MAX_BLOCKS);
152 ghash_do_update(chunk, ctx->digest, src, key,
153 partial ? ctx->buf : NULL,
157 src += chunk * GHASH_BLOCK_SIZE;
159 } while (unlikely(blocks > 0));
162 memcpy(ctx->buf + partial, src, len);
166 static int ghash_update_p8(struct shash_desc *desc, const u8 *src,
169 return __ghash_update(desc, src, len, pmull_ghash_update_p8);
172 static int ghash_update_p64(struct shash_desc *desc, const u8 *src,
175 return __ghash_update(desc, src, len, pmull_ghash_update_p64);
178 static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
180 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
181 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
184 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
186 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
188 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
189 pmull_ghash_update_p8);
191 put_unaligned_be64(ctx->digest[1], dst);
192 put_unaligned_be64(ctx->digest[0], dst + 8);
194 *ctx = (struct ghash_desc_ctx){};
198 static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
200 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
201 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
204 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
206 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
208 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
209 pmull_ghash_update_p64);
211 put_unaligned_be64(ctx->digest[1], dst);
212 put_unaligned_be64(ctx->digest[0], dst + 8);
214 *ctx = (struct ghash_desc_ctx){};
218 static void ghash_reflect(u64 h[], const be128 *k)
220 u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
222 h[0] = (be64_to_cpu(k->b) << 1) | carry;
223 h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
226 h[1] ^= 0xc200000000000000UL;
229 static int __ghash_setkey(struct ghash_key *key,
230 const u8 *inkey, unsigned int keylen)
234 /* needed for the fallback */
235 memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
237 ghash_reflect(key->h, &key->k);
240 gf128mul_lle(&h, &key->k);
241 ghash_reflect(key->h2, &h);
243 gf128mul_lle(&h, &key->k);
244 ghash_reflect(key->h3, &h);
246 gf128mul_lle(&h, &key->k);
247 ghash_reflect(key->h4, &h);
252 static int ghash_setkey(struct crypto_shash *tfm,
253 const u8 *inkey, unsigned int keylen)
255 struct ghash_key *key = crypto_shash_ctx(tfm);
257 if (keylen != GHASH_BLOCK_SIZE) {
258 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
262 return __ghash_setkey(key, inkey, keylen);
265 static struct shash_alg ghash_alg[] = {{
266 .base.cra_name = "ghash",
267 .base.cra_driver_name = "ghash-neon",
268 .base.cra_priority = 100,
269 .base.cra_blocksize = GHASH_BLOCK_SIZE,
270 .base.cra_ctxsize = sizeof(struct ghash_key),
271 .base.cra_module = THIS_MODULE,
273 .digestsize = GHASH_DIGEST_SIZE,
275 .update = ghash_update_p8,
276 .final = ghash_final_p8,
277 .setkey = ghash_setkey,
278 .descsize = sizeof(struct ghash_desc_ctx),
280 .base.cra_name = "ghash",
281 .base.cra_driver_name = "ghash-ce",
282 .base.cra_priority = 200,
283 .base.cra_blocksize = GHASH_BLOCK_SIZE,
284 .base.cra_ctxsize = sizeof(struct ghash_key),
285 .base.cra_module = THIS_MODULE,
287 .digestsize = GHASH_DIGEST_SIZE,
289 .update = ghash_update_p64,
290 .final = ghash_final_p64,
291 .setkey = ghash_setkey,
292 .descsize = sizeof(struct ghash_desc_ctx),
295 static int num_rounds(struct crypto_aes_ctx *ctx)
298 * # of rounds specified by AES:
299 * 128 bit key 10 rounds
300 * 192 bit key 12 rounds
301 * 256 bit key 14 rounds
302 * => n byte key => 6 + (n/4) rounds
304 return 6 + ctx->key_length / 4;
307 static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
310 struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
311 u8 key[GHASH_BLOCK_SIZE];
314 ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen);
316 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
320 __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
321 num_rounds(&ctx->aes_key));
323 return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
326 static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
339 static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
340 int *buf_count, struct gcm_aes_ctx *ctx)
342 if (*buf_count > 0) {
343 int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count);
345 memcpy(&buf[*buf_count], src, buf_added);
347 *buf_count += buf_added;
352 if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
353 int blocks = count / GHASH_BLOCK_SIZE;
355 ghash_do_update(blocks, dg, src, &ctx->ghash_key,
356 *buf_count ? buf : NULL,
357 pmull_ghash_update_p64);
359 src += blocks * GHASH_BLOCK_SIZE;
360 count %= GHASH_BLOCK_SIZE;
365 memcpy(buf, src, count);
370 static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
372 struct crypto_aead *aead = crypto_aead_reqtfm(req);
373 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
374 u8 buf[GHASH_BLOCK_SIZE];
375 struct scatter_walk walk;
376 u32 len = req->assoclen;
379 scatterwalk_start(&walk, req->src);
382 u32 n = scatterwalk_clamp(&walk, len);
386 scatterwalk_start(&walk, sg_next(walk.sg));
387 n = scatterwalk_clamp(&walk, len);
389 p = scatterwalk_map(&walk);
391 gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
394 scatterwalk_unmap(p);
395 scatterwalk_advance(&walk, n);
396 scatterwalk_done(&walk, 0, len);
400 memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
401 ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL,
402 pmull_ghash_update_p64);
406 static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx,
407 u64 dg[], u8 tag[], int cryptlen)
409 u8 mac[AES_BLOCK_SIZE];
412 lengths.a = cpu_to_be64(req->assoclen * 8);
413 lengths.b = cpu_to_be64(cryptlen * 8);
415 ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL,
416 pmull_ghash_update_p64);
418 put_unaligned_be64(dg[1], mac);
419 put_unaligned_be64(dg[0], mac + 8);
421 crypto_xor(tag, mac, AES_BLOCK_SIZE);
424 static int gcm_encrypt(struct aead_request *req)
426 struct crypto_aead *aead = crypto_aead_reqtfm(req);
427 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
428 struct skcipher_walk walk;
429 u8 iv[AES_BLOCK_SIZE];
430 u8 ks[2 * AES_BLOCK_SIZE];
431 u8 tag[AES_BLOCK_SIZE];
433 int nrounds = num_rounds(&ctx->aes_key);
437 gcm_calculate_auth_mac(req, dg);
439 memcpy(iv, req->iv, GCM_IV_SIZE);
440 put_unaligned_be32(1, iv + GCM_IV_SIZE);
442 err = skcipher_walk_aead_encrypt(&walk, req, false);
444 if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
445 u32 const *rk = NULL;
448 pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
449 put_unaligned_be32(2, iv + GCM_IV_SIZE);
450 pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
451 put_unaligned_be32(3, iv + GCM_IV_SIZE);
452 pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
453 put_unaligned_be32(4, iv + GCM_IV_SIZE);
456 int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
461 pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
462 walk.src.virt.addr, &ctx->ghash_key,
463 iv, rk, nrounds, ks);
466 err = skcipher_walk_done(&walk,
467 walk.nbytes % (2 * AES_BLOCK_SIZE));
469 rk = ctx->aes_key.key_enc;
470 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
472 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
473 put_unaligned_be32(2, iv + GCM_IV_SIZE);
475 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
476 int blocks = walk.nbytes / AES_BLOCK_SIZE;
477 u8 *dst = walk.dst.virt.addr;
478 u8 *src = walk.src.virt.addr;
481 __aes_arm64_encrypt(ctx->aes_key.key_enc,
483 crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
484 crypto_inc(iv, AES_BLOCK_SIZE);
486 dst += AES_BLOCK_SIZE;
487 src += AES_BLOCK_SIZE;
488 } while (--blocks > 0);
490 ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
491 walk.dst.virt.addr, &ctx->ghash_key,
492 NULL, pmull_ghash_update_p64);
494 err = skcipher_walk_done(&walk,
495 walk.nbytes % (2 * AES_BLOCK_SIZE));
498 __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
500 if (walk.nbytes > AES_BLOCK_SIZE) {
501 crypto_inc(iv, AES_BLOCK_SIZE);
502 __aes_arm64_encrypt(ctx->aes_key.key_enc,
503 ks + AES_BLOCK_SIZE, iv,
509 /* handle the tail */
511 u8 buf[GHASH_BLOCK_SIZE];
512 unsigned int nbytes = walk.nbytes;
513 u8 *dst = walk.dst.virt.addr;
516 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
519 if (walk.nbytes > GHASH_BLOCK_SIZE) {
521 dst += GHASH_BLOCK_SIZE;
522 nbytes %= GHASH_BLOCK_SIZE;
525 memcpy(buf, dst, nbytes);
526 memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
527 ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
528 pmull_ghash_update_p64);
530 err = skcipher_walk_done(&walk, 0);
536 gcm_final(req, ctx, dg, tag, req->cryptlen);
538 /* copy authtag to end of dst */
539 scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
540 crypto_aead_authsize(aead), 1);
545 static int gcm_decrypt(struct aead_request *req)
547 struct crypto_aead *aead = crypto_aead_reqtfm(req);
548 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
549 unsigned int authsize = crypto_aead_authsize(aead);
550 struct skcipher_walk walk;
551 u8 iv[2 * AES_BLOCK_SIZE];
552 u8 tag[AES_BLOCK_SIZE];
553 u8 buf[2 * GHASH_BLOCK_SIZE];
555 int nrounds = num_rounds(&ctx->aes_key);
559 gcm_calculate_auth_mac(req, dg);
561 memcpy(iv, req->iv, GCM_IV_SIZE);
562 put_unaligned_be32(1, iv + GCM_IV_SIZE);
564 err = skcipher_walk_aead_decrypt(&walk, req, false);
566 if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
567 u32 const *rk = NULL;
570 pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
571 put_unaligned_be32(2, iv + GCM_IV_SIZE);
574 int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
575 int rem = walk.total - blocks * AES_BLOCK_SIZE;
580 pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
581 walk.src.virt.addr, &ctx->ghash_key,
584 /* check if this is the final iteration of the loop */
585 if (rem < (2 * AES_BLOCK_SIZE)) {
586 u8 *iv2 = iv + AES_BLOCK_SIZE;
588 if (rem > AES_BLOCK_SIZE) {
589 memcpy(iv2, iv, AES_BLOCK_SIZE);
590 crypto_inc(iv2, AES_BLOCK_SIZE);
593 pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
595 if (rem > AES_BLOCK_SIZE)
596 pmull_gcm_encrypt_block(iv2, iv2, NULL,
602 err = skcipher_walk_done(&walk,
603 walk.nbytes % (2 * AES_BLOCK_SIZE));
605 rk = ctx->aes_key.key_enc;
606 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
608 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
609 put_unaligned_be32(2, iv + GCM_IV_SIZE);
611 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
612 int blocks = walk.nbytes / AES_BLOCK_SIZE;
613 u8 *dst = walk.dst.virt.addr;
614 u8 *src = walk.src.virt.addr;
616 ghash_do_update(blocks, dg, walk.src.virt.addr,
617 &ctx->ghash_key, NULL,
618 pmull_ghash_update_p64);
621 __aes_arm64_encrypt(ctx->aes_key.key_enc,
623 crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
624 crypto_inc(iv, AES_BLOCK_SIZE);
626 dst += AES_BLOCK_SIZE;
627 src += AES_BLOCK_SIZE;
628 } while (--blocks > 0);
630 err = skcipher_walk_done(&walk,
631 walk.nbytes % (2 * AES_BLOCK_SIZE));
634 if (walk.nbytes > AES_BLOCK_SIZE) {
635 u8 *iv2 = iv + AES_BLOCK_SIZE;
637 memcpy(iv2, iv, AES_BLOCK_SIZE);
638 crypto_inc(iv2, AES_BLOCK_SIZE);
640 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
643 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
648 /* handle the tail */
650 const u8 *src = walk.src.virt.addr;
651 const u8 *head = NULL;
652 unsigned int nbytes = walk.nbytes;
654 if (walk.nbytes > GHASH_BLOCK_SIZE) {
656 src += GHASH_BLOCK_SIZE;
657 nbytes %= GHASH_BLOCK_SIZE;
660 memcpy(buf, src, nbytes);
661 memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
662 ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
663 pmull_ghash_update_p64);
665 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
668 err = skcipher_walk_done(&walk, 0);
674 gcm_final(req, ctx, dg, tag, req->cryptlen - authsize);
676 /* compare calculated auth tag with the stored one */
677 scatterwalk_map_and_copy(buf, req->src,
678 req->assoclen + req->cryptlen - authsize,
681 if (crypto_memneq(tag, buf, authsize))
686 static struct aead_alg gcm_aes_alg = {
687 .ivsize = GCM_IV_SIZE,
688 .chunksize = 2 * AES_BLOCK_SIZE,
689 .maxauthsize = AES_BLOCK_SIZE,
690 .setkey = gcm_setkey,
691 .setauthsize = gcm_setauthsize,
692 .encrypt = gcm_encrypt,
693 .decrypt = gcm_decrypt,
695 .base.cra_name = "gcm(aes)",
696 .base.cra_driver_name = "gcm-aes-ce",
697 .base.cra_priority = 300,
698 .base.cra_blocksize = 1,
699 .base.cra_ctxsize = sizeof(struct gcm_aes_ctx),
700 .base.cra_module = THIS_MODULE,
703 static int __init ghash_ce_mod_init(void)
707 if (!(elf_hwcap & HWCAP_ASIMD))
710 if (elf_hwcap & HWCAP_PMULL)
711 ret = crypto_register_shashes(ghash_alg,
712 ARRAY_SIZE(ghash_alg));
714 /* only register the first array element */
715 ret = crypto_register_shash(ghash_alg);
720 if (elf_hwcap & HWCAP_PMULL) {
721 ret = crypto_register_aead(&gcm_aes_alg);
723 crypto_unregister_shashes(ghash_alg,
724 ARRAY_SIZE(ghash_alg));
729 static void __exit ghash_ce_mod_exit(void)
731 if (elf_hwcap & HWCAP_PMULL)
732 crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
734 crypto_unregister_shash(ghash_alg);
735 crypto_unregister_aead(&gcm_aes_alg);
738 static const struct cpu_feature ghash_cpu_feature[] = {
739 { cpu_feature(PMULL) }, { }
741 MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
743 module_init(ghash_ce_mod_init);
744 module_exit(ghash_ce_mod_exit);