4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/percpu.h>
19 #include <linux/smp.h>
20 #include <asm/byteorder.h>
21 #include <asm/processor.h>
25 /* number of data blocks actually fetched for each xcrypt insn */
26 static unsigned int ecb_fetch_blocks = 2;
27 static unsigned int cbc_fetch_blocks = 1;
29 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
30 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
34 unsigned int __attribute__ ((__packed__))
41 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
43 /* Whenever making any changes to the following
44 * structure *make sure* you keep E, d_data
45 * and cword aligned on 16 Bytes boundaries and
46 * the Hardware can access 16 * 16 bytes of E and d_data
47 * (only the first 15 * 16 bytes matter but the HW reads
51 u32 E[AES_MAX_KEYLENGTH_U32]
52 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
53 u32 d_data[AES_MAX_KEYLENGTH_U32]
54 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
62 static DEFINE_PER_CPU(struct cword *, last_cword);
64 /* Tells whether the ACE is capable to generate
65 the extended key for a given key_len. */
67 aes_hw_extkey_available(uint8_t key_len)
69 /* TODO: We should check the actual CPU model/stepping
70 as it's possible that the capability will be
71 added in the next CPU revisions. */
77 static inline struct aes_ctx *aes_ctx_common(void *ctx)
79 unsigned long addr = (unsigned long)ctx;
80 unsigned long align = PADLOCK_ALIGNMENT;
82 if (align <= crypto_tfm_ctx_alignment())
84 return (struct aes_ctx *)ALIGN(addr, align);
87 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
89 return aes_ctx_common(crypto_tfm_ctx(tfm));
92 static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
94 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
97 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
100 struct aes_ctx *ctx = aes_ctx(tfm);
101 const __le32 *key = (const __le32 *)in_key;
102 u32 *flags = &tfm->crt_flags;
103 struct crypto_aes_ctx gen_aes;
107 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
112 * If the hardware is capable of generating the extended key
113 * itself we must supply the plain key for both encryption
118 ctx->E[0] = le32_to_cpu(key[0]);
119 ctx->E[1] = le32_to_cpu(key[1]);
120 ctx->E[2] = le32_to_cpu(key[2]);
121 ctx->E[3] = le32_to_cpu(key[3]);
123 /* Prepare control words. */
124 memset(&ctx->cword, 0, sizeof(ctx->cword));
126 ctx->cword.decrypt.encdec = 1;
127 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
128 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
129 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
130 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
132 /* Don't generate extended keys if the hardware can do it. */
133 if (aes_hw_extkey_available(key_len))
136 ctx->D = ctx->d_data;
137 ctx->cword.encrypt.keygen = 1;
138 ctx->cword.decrypt.keygen = 1;
140 if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
141 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
145 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
146 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
149 for_each_online_cpu(cpu)
150 if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
151 &ctx->cword.decrypt == per_cpu(last_cword, cpu))
152 per_cpu(last_cword, cpu) = NULL;
157 /* ====== Encryption/decryption routines ====== */
159 /* These are the real call to PadLock. */
160 static inline void padlock_reset_key(struct cword *cword)
162 int cpu = raw_smp_processor_id();
164 if (cword != per_cpu(last_cword, cpu))
165 #ifndef CONFIG_X86_64
166 asm volatile ("pushfl; popfl");
168 asm volatile ("pushfq; popfq");
172 static inline void padlock_store_cword(struct cword *cword)
174 per_cpu(last_cword, raw_smp_processor_id()) = cword;
178 * While the padlock instructions don't use FP/SSE registers, they
179 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
180 * should be used only inside the irq_ts_save/restore() context
183 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
184 struct cword *control_word, int count)
186 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
187 : "+S"(input), "+D"(output)
188 : "d"(control_word), "b"(key), "c"(count));
191 static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key,
192 struct cword *cword, int count)
195 * Padlock prefetches extra data so we must provide mapped input buffers.
196 * Assume there are at least 16 bytes of stack already in use.
198 u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1];
199 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
201 memcpy(tmp, in, count * AES_BLOCK_SIZE);
202 padlock_xcrypt(tmp, out, key, cword, count);
205 static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
206 struct cword *cword, int count)
208 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
209 * We could avoid some copying here but it's probably not worth it.
211 if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
212 aes_crypt_copy(in, out, key, cword, count);
216 padlock_xcrypt(in, out, key, cword, count);
219 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
220 void *control_word, u32 count)
222 u32 initial = count & (ecb_fetch_blocks - 1);
224 if (count < ecb_fetch_blocks) {
225 aes_crypt(input, output, key, control_word, count);
230 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
231 : "+S"(input), "+D"(output)
232 : "d"(control_word), "b"(key), "c"(initial));
234 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
235 : "+S"(input), "+D"(output)
236 : "d"(control_word), "b"(key), "c"(count - initial));
239 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
240 u8 *iv, void *control_word, u32 count)
243 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
244 : "+S" (input), "+D" (output), "+a" (iv)
245 : "d" (control_word), "b" (key), "c" (count));
249 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
251 struct aes_ctx *ctx = aes_ctx(tfm);
254 padlock_reset_key(&ctx->cword.encrypt);
255 ts_state = irq_ts_save();
256 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
257 irq_ts_restore(ts_state);
258 padlock_store_cword(&ctx->cword.encrypt);
261 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
263 struct aes_ctx *ctx = aes_ctx(tfm);
266 padlock_reset_key(&ctx->cword.encrypt);
267 ts_state = irq_ts_save();
268 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
269 irq_ts_restore(ts_state);
270 padlock_store_cword(&ctx->cword.encrypt);
273 static struct crypto_alg aes_alg = {
275 .cra_driver_name = "aes-padlock",
276 .cra_priority = PADLOCK_CRA_PRIORITY,
277 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
278 .cra_blocksize = AES_BLOCK_SIZE,
279 .cra_ctxsize = sizeof(struct aes_ctx),
280 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
281 .cra_module = THIS_MODULE,
282 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
285 .cia_min_keysize = AES_MIN_KEY_SIZE,
286 .cia_max_keysize = AES_MAX_KEY_SIZE,
287 .cia_setkey = aes_set_key,
288 .cia_encrypt = aes_encrypt,
289 .cia_decrypt = aes_decrypt,
294 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
295 struct scatterlist *dst, struct scatterlist *src,
298 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
299 struct blkcipher_walk walk;
303 padlock_reset_key(&ctx->cword.encrypt);
305 blkcipher_walk_init(&walk, dst, src, nbytes);
306 err = blkcipher_walk_virt(desc, &walk);
308 ts_state = irq_ts_save();
309 while ((nbytes = walk.nbytes)) {
310 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
311 ctx->E, &ctx->cword.encrypt,
312 nbytes / AES_BLOCK_SIZE);
313 nbytes &= AES_BLOCK_SIZE - 1;
314 err = blkcipher_walk_done(desc, &walk, nbytes);
316 irq_ts_restore(ts_state);
318 padlock_store_cword(&ctx->cword.encrypt);
323 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
324 struct scatterlist *dst, struct scatterlist *src,
327 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
328 struct blkcipher_walk walk;
332 padlock_reset_key(&ctx->cword.decrypt);
334 blkcipher_walk_init(&walk, dst, src, nbytes);
335 err = blkcipher_walk_virt(desc, &walk);
337 ts_state = irq_ts_save();
338 while ((nbytes = walk.nbytes)) {
339 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
340 ctx->D, &ctx->cword.decrypt,
341 nbytes / AES_BLOCK_SIZE);
342 nbytes &= AES_BLOCK_SIZE - 1;
343 err = blkcipher_walk_done(desc, &walk, nbytes);
345 irq_ts_restore(ts_state);
347 padlock_store_cword(&ctx->cword.encrypt);
352 static struct crypto_alg ecb_aes_alg = {
353 .cra_name = "ecb(aes)",
354 .cra_driver_name = "ecb-aes-padlock",
355 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
356 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
357 .cra_blocksize = AES_BLOCK_SIZE,
358 .cra_ctxsize = sizeof(struct aes_ctx),
359 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
360 .cra_type = &crypto_blkcipher_type,
361 .cra_module = THIS_MODULE,
362 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
365 .min_keysize = AES_MIN_KEY_SIZE,
366 .max_keysize = AES_MAX_KEY_SIZE,
367 .setkey = aes_set_key,
368 .encrypt = ecb_aes_encrypt,
369 .decrypt = ecb_aes_decrypt,
374 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
375 struct scatterlist *dst, struct scatterlist *src,
378 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
379 struct blkcipher_walk walk;
383 padlock_reset_key(&ctx->cword.encrypt);
385 blkcipher_walk_init(&walk, dst, src, nbytes);
386 err = blkcipher_walk_virt(desc, &walk);
388 ts_state = irq_ts_save();
389 while ((nbytes = walk.nbytes)) {
390 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
391 walk.dst.virt.addr, ctx->E,
392 walk.iv, &ctx->cword.encrypt,
393 nbytes / AES_BLOCK_SIZE);
394 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
395 nbytes &= AES_BLOCK_SIZE - 1;
396 err = blkcipher_walk_done(desc, &walk, nbytes);
398 irq_ts_restore(ts_state);
400 padlock_store_cword(&ctx->cword.decrypt);
405 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
406 struct scatterlist *dst, struct scatterlist *src,
409 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
410 struct blkcipher_walk walk;
414 padlock_reset_key(&ctx->cword.encrypt);
416 blkcipher_walk_init(&walk, dst, src, nbytes);
417 err = blkcipher_walk_virt(desc, &walk);
419 ts_state = irq_ts_save();
420 while ((nbytes = walk.nbytes)) {
421 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
422 ctx->D, walk.iv, &ctx->cword.decrypt,
423 nbytes / AES_BLOCK_SIZE);
424 nbytes &= AES_BLOCK_SIZE - 1;
425 err = blkcipher_walk_done(desc, &walk, nbytes);
428 irq_ts_restore(ts_state);
430 padlock_store_cword(&ctx->cword.encrypt);
435 static struct crypto_alg cbc_aes_alg = {
436 .cra_name = "cbc(aes)",
437 .cra_driver_name = "cbc-aes-padlock",
438 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
439 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
440 .cra_blocksize = AES_BLOCK_SIZE,
441 .cra_ctxsize = sizeof(struct aes_ctx),
442 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
443 .cra_type = &crypto_blkcipher_type,
444 .cra_module = THIS_MODULE,
445 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
448 .min_keysize = AES_MIN_KEY_SIZE,
449 .max_keysize = AES_MAX_KEY_SIZE,
450 .ivsize = AES_BLOCK_SIZE,
451 .setkey = aes_set_key,
452 .encrypt = cbc_aes_encrypt,
453 .decrypt = cbc_aes_decrypt,
458 static int __init padlock_init(void)
461 struct cpuinfo_x86 *c = &cpu_data(0);
463 if (!cpu_has_xcrypt) {
464 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
468 if (!cpu_has_xcrypt_enabled) {
469 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
473 if ((ret = crypto_register_alg(&aes_alg)))
476 if ((ret = crypto_register_alg(&ecb_aes_alg)))
479 if ((ret = crypto_register_alg(&cbc_aes_alg)))
482 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
484 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
485 ecb_fetch_blocks = 8;
486 cbc_fetch_blocks = 4; /* NOTE: notused */
487 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
494 crypto_unregister_alg(&ecb_aes_alg);
496 crypto_unregister_alg(&aes_alg);
498 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
502 static void __exit padlock_fini(void)
504 crypto_unregister_alg(&cbc_aes_alg);
505 crypto_unregister_alg(&ecb_aes_alg);
506 crypto_unregister_alg(&aes_alg);
509 module_init(padlock_init);
510 module_exit(padlock_fini);
512 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
513 MODULE_LICENSE("GPL");
514 MODULE_AUTHOR("Michal Ludvig");