2 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
4 * Copyright (C) 2014-2017 Axis Communications AB
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/bitfield.h>
9 #include <linux/crypto.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/fault-inject.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
24 #include <crypto/aes.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/internal/hash.h>
27 #include <crypto/internal/skcipher.h>
28 #include <crypto/scatterwalk.h>
29 #include <crypto/sha.h>
30 #include <crypto/xts.h>
32 /* Max length of a line in all cache levels for Artpec SoCs. */
33 #define ARTPEC_CACHE_LINE_MAX 32
35 #define PDMA_OUT_CFG 0x0000
36 #define PDMA_OUT_BUF_CFG 0x0004
37 #define PDMA_OUT_CMD 0x0008
38 #define PDMA_OUT_DESCRQ_PUSH 0x0010
39 #define PDMA_OUT_DESCRQ_STAT 0x0014
41 #define A6_PDMA_IN_CFG 0x0028
42 #define A6_PDMA_IN_BUF_CFG 0x002c
43 #define A6_PDMA_IN_CMD 0x0030
44 #define A6_PDMA_IN_STATQ_PUSH 0x0038
45 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
46 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
47 #define A6_PDMA_INTR_MASK 0x0068
48 #define A6_PDMA_ACK_INTR 0x006c
49 #define A6_PDMA_MASKED_INTR 0x0074
51 #define A7_PDMA_IN_CFG 0x002c
52 #define A7_PDMA_IN_BUF_CFG 0x0030
53 #define A7_PDMA_IN_CMD 0x0034
54 #define A7_PDMA_IN_STATQ_PUSH 0x003c
55 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
56 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
57 #define A7_PDMA_INTR_MASK 0x006c
58 #define A7_PDMA_ACK_INTR 0x0070
59 #define A7_PDMA_MASKED_INTR 0x0078
61 #define PDMA_OUT_CFG_EN BIT(0)
63 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
64 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
66 #define PDMA_OUT_CMD_START BIT(0)
67 #define A6_PDMA_OUT_CMD_STOP BIT(3)
68 #define A7_PDMA_OUT_CMD_STOP BIT(2)
70 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
71 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
73 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
74 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
76 #define PDMA_IN_CFG_EN BIT(0)
78 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
79 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
80 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
82 #define PDMA_IN_CMD_START BIT(0)
83 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
84 #define A6_PDMA_IN_CMD_STOP BIT(3)
85 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
86 #define A7_PDMA_IN_CMD_STOP BIT(2)
88 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
89 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
91 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
92 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
94 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
95 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
97 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
98 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
99 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
101 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
102 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
103 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
105 #define A6_CRY_MD_OPER GENMASK(19, 16)
107 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
108 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
110 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
111 #define A6_CRY_MD_CIPHER_DECR BIT(22)
112 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
113 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
115 #define A7_CRY_MD_OPER GENMASK(11, 8)
117 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
118 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
120 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
121 #define A7_CRY_MD_CIPHER_DECR BIT(14)
122 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
123 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
125 /* DMA metadata constants */
126 #define regk_crypto_aes_cbc 0x00000002
127 #define regk_crypto_aes_ctr 0x00000003
128 #define regk_crypto_aes_ecb 0x00000001
129 #define regk_crypto_aes_gcm 0x00000004
130 #define regk_crypto_aes_xts 0x00000005
131 #define regk_crypto_cache 0x00000002
132 #define a6_regk_crypto_dlkey 0x0000000a
133 #define a7_regk_crypto_dlkey 0x0000000e
134 #define regk_crypto_ext 0x00000001
135 #define regk_crypto_hmac_sha1 0x00000007
136 #define regk_crypto_hmac_sha256 0x00000009
137 #define regk_crypto_hmac_sha384 0x0000000b
138 #define regk_crypto_hmac_sha512 0x0000000d
139 #define regk_crypto_init 0x00000000
140 #define regk_crypto_key_128 0x00000000
141 #define regk_crypto_key_192 0x00000001
142 #define regk_crypto_key_256 0x00000002
143 #define regk_crypto_null 0x00000000
144 #define regk_crypto_sha1 0x00000006
145 #define regk_crypto_sha256 0x00000008
146 #define regk_crypto_sha384 0x0000000a
147 #define regk_crypto_sha512 0x0000000c
149 /* DMA descriptor structures */
150 struct pdma_descr_ctrl {
151 unsigned char short_descr : 1;
152 unsigned char pad1 : 1;
153 unsigned char eop : 1;
154 unsigned char intr : 1;
155 unsigned char short_len : 3;
156 unsigned char pad2 : 1;
159 struct pdma_data_descr {
160 unsigned int len : 24;
161 unsigned int buf : 32;
164 struct pdma_short_descr {
165 unsigned char data[7];
169 struct pdma_descr_ctrl ctrl;
171 struct pdma_data_descr data;
172 struct pdma_short_descr shrt;
176 struct pdma_stat_descr {
177 unsigned char pad1 : 1;
178 unsigned char pad2 : 1;
179 unsigned char eop : 1;
180 unsigned char pad3 : 5;
181 unsigned int len : 24;
184 /* Each descriptor array can hold max 64 entries */
185 #define PDMA_DESCR_COUNT 64
187 #define MODULE_NAME "Artpec-6 CA"
189 /* Hash modes (including HMAC variants) */
190 #define ARTPEC6_CRYPTO_HASH_SHA1 1
191 #define ARTPEC6_CRYPTO_HASH_SHA256 2
192 #define ARTPEC6_CRYPTO_HASH_SHA384 3
193 #define ARTPEC6_CRYPTO_HASH_SHA512 4
196 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
197 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
198 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
199 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
201 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
202 * It operates on a descriptor array with up to 64 descriptor entries.
203 * The arrays must be 64 byte aligned in memory.
205 * The ciphering unit has no registers and is completely controlled by
206 * a 4-byte metadata that is inserted at the beginning of each dma packet.
208 * A dma packet is a sequence of descriptors terminated by setting the .eop
209 * field in the final descriptor of the packet.
211 * Multiple packets are used for providing context data, key data and
212 * the plain/ciphertext.
214 * PDMA Descriptors (Array)
215 * +------+------+------+~~+-------+------+----
216 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
217 * +--+---+--+---+----+-+~~+-------+----+-+----
220 * __|__ +-------++-------++-------+ +----+
221 * | MD | |Payload||Payload||Payload| | MD |
222 * +-----+ +-------++-------++-------+ +----+
225 struct artpec6_crypto_bounce_buffer {
226 struct list_head list;
228 struct scatterlist *sg;
230 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
231 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
236 struct artpec6_crypto_dma_map {
239 enum dma_data_direction dir;
242 struct artpec6_crypto_dma_descriptors {
243 struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
244 struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
245 u32 stat[PDMA_DESCR_COUNT] __aligned(64);
246 struct list_head bounce_buffers;
247 /* Enough maps for all out/in buffers, and all three descr. arrays */
248 struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
249 dma_addr_t out_dma_addr;
250 dma_addr_t in_dma_addr;
251 dma_addr_t stat_dma_addr;
257 enum artpec6_crypto_variant {
262 struct artpec6_crypto {
264 spinlock_t queue_lock;
265 struct list_head queue; /* waiting for pdma fifo space */
266 struct list_head pending; /* submitted to pdma fifo */
267 struct tasklet_struct task;
268 struct kmem_cache *dma_cache;
270 struct timer_list timer;
271 enum artpec6_crypto_variant variant;
272 void *pad_buffer; /* cache-aligned block padding buffer */
276 enum artpec6_crypto_hash_flags {
277 HASH_FLAG_INIT_CTX = 2,
278 HASH_FLAG_UPDATE = 4,
279 HASH_FLAG_FINALIZE = 8,
281 HASH_FLAG_UPDATE_KEY = 32,
284 struct artpec6_crypto_req_common {
285 struct list_head list;
286 struct artpec6_crypto_dma_descriptors *dma;
287 struct crypto_async_request *req;
288 void (*complete)(struct crypto_async_request *req);
292 struct artpec6_hash_request_context {
293 char partial_buffer[SHA512_BLOCK_SIZE];
294 char partial_buffer_out[SHA512_BLOCK_SIZE];
295 char key_buffer[SHA512_BLOCK_SIZE];
296 char pad_buffer[SHA512_BLOCK_SIZE + 32];
297 unsigned char digeststate[SHA512_DIGEST_SIZE];
298 size_t partial_bytes;
302 enum artpec6_crypto_hash_flags hash_flags;
303 struct artpec6_crypto_req_common common;
306 struct artpec6_hash_export_state {
307 char partial_buffer[SHA512_BLOCK_SIZE];
308 unsigned char digeststate[SHA512_DIGEST_SIZE];
309 size_t partial_bytes;
312 unsigned int hash_flags;
315 struct artpec6_hashalg_context {
316 char hmac_key[SHA512_BLOCK_SIZE];
317 size_t hmac_key_length;
318 struct crypto_shash *child_hash;
321 struct artpec6_crypto_request_context {
324 struct artpec6_crypto_req_common common;
327 struct artpec6_cryptotfm_context {
328 unsigned char aes_key[2*AES_MAX_KEY_SIZE];
332 struct crypto_skcipher *fallback;
335 struct artpec6_crypto_aead_hw_ctx {
336 __be64 aad_length_bits;
337 __be64 text_length_bits;
338 __u8 J0[AES_BLOCK_SIZE];
341 struct artpec6_crypto_aead_req_ctx {
342 struct artpec6_crypto_aead_hw_ctx hw_ctx;
345 struct artpec6_crypto_req_common common;
346 __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
349 /* The crypto framework makes it hard to avoid this global. */
350 static struct device *artpec6_crypto_dev;
352 static struct dentry *dbgfs_root;
354 #ifdef CONFIG_FAULT_INJECTION
355 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
356 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
360 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
361 ARTPEC6_CRYPTO_PREPARE_HASH_START,
364 static int artpec6_crypto_prepare_aead(struct aead_request *areq);
365 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
366 static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
369 artpec6_crypto_complete_crypto(struct crypto_async_request *req);
371 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
373 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
375 artpec6_crypto_complete_aead(struct crypto_async_request *req);
377 artpec6_crypto_complete_hash(struct crypto_async_request *req);
380 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
383 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
385 struct artpec6_crypto_walk {
386 struct scatterlist *sg;
390 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
391 struct scatterlist *sg)
397 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
400 while (nbytes && awalk->sg) {
403 WARN_ON(awalk->offset > awalk->sg->length);
405 piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
407 awalk->offset += piece;
408 if (awalk->offset == awalk->sg->length) {
409 awalk->sg = sg_next(awalk->sg);
419 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
421 WARN_ON(awalk->sg->length == awalk->offset);
423 return awalk->sg->length - awalk->offset;
427 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
429 return sg_phys(awalk->sg) + awalk->offset;
433 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
435 struct artpec6_crypto_dma_descriptors *dma = common->dma;
436 struct artpec6_crypto_bounce_buffer *b;
437 struct artpec6_crypto_bounce_buffer *next;
439 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
440 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
441 b, b->length, b->offset, b->buf);
442 sg_pcopy_from_buffer(b->sg,
453 static inline bool artpec6_crypto_busy(void)
455 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
456 int fifo_count = ac->pending_count;
458 return fifo_count > 6;
461 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
463 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
466 spin_lock_bh(&ac->queue_lock);
468 if (!artpec6_crypto_busy()) {
469 list_add_tail(&req->list, &ac->pending);
470 artpec6_crypto_start_dma(req);
472 } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
473 list_add_tail(&req->list, &ac->queue);
475 artpec6_crypto_common_destroy(req);
478 spin_unlock_bh(&ac->queue_lock);
483 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
485 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
486 enum artpec6_crypto_variant variant = ac->variant;
487 void __iomem *base = ac->base;
488 struct artpec6_crypto_dma_descriptors *dma = common->dma;
489 u32 ind, statd, outd;
491 /* Make descriptor content visible to the DMA before starting it. */
494 ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
495 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
497 statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
498 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
500 outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
501 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
503 if (variant == ARTPEC6_CRYPTO) {
504 writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
505 writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
506 writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
508 writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
509 writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
510 writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
513 writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
514 writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
520 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
522 struct artpec6_crypto_dma_descriptors *dma = common->dma;
527 INIT_LIST_HEAD(&dma->bounce_buffers);
530 static bool fault_inject_dma_descr(void)
532 #ifdef CONFIG_FAULT_INJECTION
533 return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
539 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
542 * @addr: The physical address of the data buffer
543 * @len: The length of the data buffer
544 * @eop: True if this is the last buffer in the packet
546 * @return 0 on success or -ENOSPC if there are no more descriptors available
549 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
550 dma_addr_t addr, size_t len, bool eop)
552 struct artpec6_crypto_dma_descriptors *dma = common->dma;
553 struct pdma_descr *d;
555 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
556 fault_inject_dma_descr()) {
557 pr_err("No free OUT DMA descriptors available!\n");
561 d = &dma->out[dma->out_cnt++];
562 memset(d, 0, sizeof(*d));
564 d->ctrl.short_descr = 0;
571 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
573 * @dst: The virtual address of the data
574 * @len: The length of the data, must be between 1 to 7 bytes
575 * @eop: True if this is the last buffer in the packet
577 * @return 0 on success
578 * -ENOSPC if no more descriptors are available
579 * -EINVAL if the data length exceeds 7 bytes
582 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
583 void *dst, unsigned int len, bool eop)
585 struct artpec6_crypto_dma_descriptors *dma = common->dma;
586 struct pdma_descr *d;
588 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
589 fault_inject_dma_descr()) {
590 pr_err("No free OUT DMA descriptors available!\n");
592 } else if (len > 7 || len < 1) {
595 d = &dma->out[dma->out_cnt++];
596 memset(d, 0, sizeof(*d));
598 d->ctrl.short_descr = 1;
599 d->ctrl.short_len = len;
601 memcpy(d->shrt.data, dst, len);
605 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
606 struct page *page, size_t offset,
608 enum dma_data_direction dir,
609 dma_addr_t *dma_addr_out)
611 struct artpec6_crypto_dma_descriptors *dma = common->dma;
612 struct device *dev = artpec6_crypto_dev;
613 struct artpec6_crypto_dma_map *map;
618 if (dma->map_count >= ARRAY_SIZE(dma->maps))
621 dma_addr = dma_map_page(dev, page, offset, size, dir);
622 if (dma_mapping_error(dev, dma_addr))
625 map = &dma->maps[dma->map_count++];
627 map->dma_addr = dma_addr;
630 *dma_addr_out = dma_addr;
636 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
637 void *ptr, size_t size,
638 enum dma_data_direction dir,
639 dma_addr_t *dma_addr_out)
641 struct page *page = virt_to_page(ptr);
642 size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
644 return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
649 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
651 struct artpec6_crypto_dma_descriptors *dma = common->dma;
654 ret = artpec6_crypto_dma_map_single(common, dma->in,
655 sizeof(dma->in[0]) * dma->in_cnt,
656 DMA_TO_DEVICE, &dma->in_dma_addr);
660 ret = artpec6_crypto_dma_map_single(common, dma->out,
661 sizeof(dma->out[0]) * dma->out_cnt,
662 DMA_TO_DEVICE, &dma->out_dma_addr);
666 /* We only read one stat descriptor */
667 dma->stat[dma->in_cnt - 1] = 0;
670 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
673 return artpec6_crypto_dma_map_single(common,
674 dma->stat + dma->in_cnt - 1,
675 sizeof(dma->stat[0]),
677 &dma->stat_dma_addr);
681 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
683 struct artpec6_crypto_dma_descriptors *dma = common->dma;
684 struct device *dev = artpec6_crypto_dev;
687 for (i = 0; i < dma->map_count; i++) {
688 struct artpec6_crypto_dma_map *map = &dma->maps[i];
690 dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
696 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
698 * @dst: The virtual address of the data
699 * @len: The length of the data
700 * @eop: True if this is the last buffer in the packet
701 * @use_short: If this is true and the data length is 7 bytes or less then
702 * a short descriptor will be used
704 * @return 0 on success
705 * Any errors from artpec6_crypto_setup_out_descr_short() or
706 * setup_out_descr_phys()
709 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
710 void *dst, unsigned int len, bool eop,
713 if (use_short && len < 7) {
714 return artpec6_crypto_setup_out_descr_short(common, dst, len,
720 ret = artpec6_crypto_dma_map_single(common, dst, len,
726 return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
731 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
734 * @addr: The physical address of the data buffer
735 * @len: The length of the data buffer
736 * @intr: True if an interrupt should be fired after HW processing of this
741 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
742 dma_addr_t addr, unsigned int len, bool intr)
744 struct artpec6_crypto_dma_descriptors *dma = common->dma;
745 struct pdma_descr *d;
747 if (dma->in_cnt >= PDMA_DESCR_COUNT ||
748 fault_inject_dma_descr()) {
749 pr_err("No free IN DMA descriptors available!\n");
752 d = &dma->in[dma->in_cnt++];
753 memset(d, 0, sizeof(*d));
761 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
763 * @buffer: The virtual address to of the data buffer
764 * @len: The length of the data buffer
765 * @last: If this is the last data buffer in the request (i.e. an interrupt
768 * Short descriptors are not used for the in channel
771 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
772 void *buffer, unsigned int len, bool last)
777 ret = artpec6_crypto_dma_map_single(common, buffer, len,
778 DMA_FROM_DEVICE, &dma_addr);
782 return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
785 static struct artpec6_crypto_bounce_buffer *
786 artpec6_crypto_alloc_bounce(gfp_t flags)
789 size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
790 2 * ARTPEC_CACHE_LINE_MAX;
791 struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
797 bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
801 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
802 struct artpec6_crypto_walk *walk, size_t size)
804 struct artpec6_crypto_bounce_buffer *bbuf;
807 bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
813 bbuf->offset = walk->offset;
815 ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
821 pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
822 list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
827 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
828 struct artpec6_crypto_walk *walk,
835 while (walk->sg && count) {
836 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
837 addr = artpec6_crypto_walk_chunk_phys(walk);
839 /* When destination buffers are not aligned to the cache line
840 * size we need bounce buffers. The DMA-API requires that the
841 * entire line is owned by the DMA buffer and this holds also
842 * for the case when coherent DMA is used.
844 if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
845 chunk = min_t(dma_addr_t, chunk,
846 ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
849 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
850 ret = setup_bounce_buffer_in(common, walk, chunk);
851 } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
852 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
853 ret = setup_bounce_buffer_in(common, walk, chunk);
857 chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
859 pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
861 ret = artpec6_crypto_dma_map_page(common,
871 ret = artpec6_crypto_setup_in_descr_phys(common,
879 count = count - chunk;
880 artpec6_crypto_walk_advance(walk, chunk);
884 pr_err("EOL unexpected %zu bytes left\n", count);
886 return count ? -EINVAL : 0;
890 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
891 struct artpec6_crypto_walk *walk,
898 while (walk->sg && count) {
899 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
900 addr = artpec6_crypto_walk_chunk_phys(walk);
902 pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
907 chunk = min_t(size_t, chunk, (4-(addr&3)));
909 sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
912 ret = artpec6_crypto_setup_out_descr_short(common, buf,
918 ret = artpec6_crypto_dma_map_page(common,
928 ret = artpec6_crypto_setup_out_descr_phys(common,
936 count = count - chunk;
937 artpec6_crypto_walk_advance(walk, chunk);
941 pr_err("EOL unexpected %zu bytes left\n", count);
943 return count ? -EINVAL : 0;
947 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
949 * If the out descriptor list is non-empty, then the eop flag on the
950 * last used out descriptor will be set.
952 * @return 0 on success
953 * -EINVAL if the out descriptor is empty or has overflown
956 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
958 struct artpec6_crypto_dma_descriptors *dma = common->dma;
959 struct pdma_descr *d;
961 if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
962 pr_err("%s: OUT descriptor list is %s\n",
963 MODULE_NAME, dma->out_cnt ? "empty" : "full");
968 d = &dma->out[dma->out_cnt-1];
974 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
977 * See artpec6_crypto_terminate_out_descrs() for return values
980 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
982 struct artpec6_crypto_dma_descriptors *dma = common->dma;
983 struct pdma_descr *d;
985 if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
986 pr_err("%s: IN descriptor list is %s\n",
987 MODULE_NAME, dma->in_cnt ? "empty" : "full");
991 d = &dma->in[dma->in_cnt-1];
996 /** create_hash_pad - Create a Secure Hash conformant pad
998 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
999 * @dgstlen: The total length of the hash digest in bytes
1000 * @bitcount: The total length of the digest in bits
1002 * @return The total number of padding bytes written to @dst
1005 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1007 unsigned int mod, target, diff, pad_bytes, size_bytes;
1008 __be64 bits = __cpu_to_be64(bitcount);
1011 case regk_crypto_sha1:
1012 case regk_crypto_sha256:
1013 case regk_crypto_hmac_sha1:
1014 case regk_crypto_hmac_sha256:
1027 diff = dgstlen & (mod - 1);
1028 pad_bytes = diff > target ? target + mod - diff : target - diff;
1030 memset(dst + 1, 0, pad_bytes);
1033 if (size_bytes == 16) {
1034 memset(dst + 1 + pad_bytes, 0, 8);
1035 memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1037 memcpy(dst + 1 + pad_bytes, &bits, 8);
1040 return pad_bytes + size_bytes + 1;
1043 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1044 struct crypto_async_request *parent,
1045 void (*complete)(struct crypto_async_request *req),
1046 struct scatterlist *dstsg, unsigned int nbytes)
1049 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1051 flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1052 GFP_KERNEL : GFP_ATOMIC;
1054 common->gfp_flags = flags;
1055 common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1059 common->req = parent;
1060 common->complete = complete;
1065 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1067 struct artpec6_crypto_bounce_buffer *b;
1068 struct artpec6_crypto_bounce_buffer *next;
1070 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1076 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1078 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1080 artpec6_crypto_dma_unmap_all(common);
1081 artpec6_crypto_bounce_destroy(common->dma);
1082 kmem_cache_free(ac->dma_cache, common->dma);
1088 * Ciphering functions.
1090 static int artpec6_crypto_encrypt(struct skcipher_request *req)
1092 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1093 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1094 struct artpec6_crypto_request_context *req_ctx = NULL;
1095 void (*complete)(struct crypto_async_request *req);
1098 req_ctx = skcipher_request_ctx(req);
1100 switch (ctx->crypto_type) {
1101 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1102 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1103 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1104 req_ctx->decrypt = 0;
1110 switch (ctx->crypto_type) {
1111 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1112 complete = artpec6_crypto_complete_cbc_encrypt;
1115 complete = artpec6_crypto_complete_crypto;
1119 ret = artpec6_crypto_common_init(&req_ctx->common,
1122 req->dst, req->cryptlen);
1126 ret = artpec6_crypto_prepare_crypto(req);
1128 artpec6_crypto_common_destroy(&req_ctx->common);
1132 return artpec6_crypto_submit(&req_ctx->common);
1135 static int artpec6_crypto_decrypt(struct skcipher_request *req)
1138 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1139 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1140 struct artpec6_crypto_request_context *req_ctx = NULL;
1141 void (*complete)(struct crypto_async_request *req);
1143 req_ctx = skcipher_request_ctx(req);
1145 switch (ctx->crypto_type) {
1146 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1147 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1148 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1149 req_ctx->decrypt = 1;
1156 switch (ctx->crypto_type) {
1157 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1158 complete = artpec6_crypto_complete_cbc_decrypt;
1161 complete = artpec6_crypto_complete_crypto;
1165 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1167 req->dst, req->cryptlen);
1171 ret = artpec6_crypto_prepare_crypto(req);
1173 artpec6_crypto_common_destroy(&req_ctx->common);
1177 return artpec6_crypto_submit(&req_ctx->common);
1181 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1183 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1184 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1185 size_t iv_len = crypto_skcipher_ivsize(cipher);
1186 unsigned int counter = be32_to_cpup((__be32 *)
1187 (req->iv + iv_len - 4));
1188 unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1192 * The hardware uses only the last 32-bits as the counter while the
1193 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1194 * the whole IV is a counter. So fallback if the counter is going to
1197 if (counter + nblks < counter) {
1200 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1201 counter, counter + nblks);
1203 ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
1209 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1211 skcipher_request_set_tfm(subreq, ctx->fallback);
1212 skcipher_request_set_callback(subreq, req->base.flags,
1214 skcipher_request_set_crypt(subreq, req->src, req->dst,
1215 req->cryptlen, req->iv);
1216 ret = encrypt ? crypto_skcipher_encrypt(subreq)
1217 : crypto_skcipher_decrypt(subreq);
1218 skcipher_request_zero(subreq);
1223 return encrypt ? artpec6_crypto_encrypt(req)
1224 : artpec6_crypto_decrypt(req);
1227 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1229 return artpec6_crypto_ctr_crypt(req, true);
1232 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1234 return artpec6_crypto_ctr_crypt(req, false);
1240 static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1242 struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1244 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1246 crypto_aead_set_reqsize(tfm,
1247 sizeof(struct artpec6_crypto_aead_req_ctx));
1252 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1255 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1257 if (len != 16 && len != 24 && len != 32) {
1258 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1262 ctx->key_length = len;
1264 memcpy(ctx->aes_key, key, len);
1268 static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1271 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1273 req_ctx->decrypt = false;
1274 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1275 artpec6_crypto_complete_aead,
1280 ret = artpec6_crypto_prepare_aead(req);
1282 artpec6_crypto_common_destroy(&req_ctx->common);
1286 return artpec6_crypto_submit(&req_ctx->common);
1289 static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1292 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1294 req_ctx->decrypt = true;
1295 if (req->cryptlen < AES_BLOCK_SIZE)
1298 ret = artpec6_crypto_common_init(&req_ctx->common,
1300 artpec6_crypto_complete_aead,
1305 ret = artpec6_crypto_prepare_aead(req);
1307 artpec6_crypto_common_destroy(&req_ctx->common);
1311 return artpec6_crypto_submit(&req_ctx->common);
1314 static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1316 struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1317 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1318 size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1319 size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
1320 SHA512_DIGEST_SIZE : digestsize;
1321 size_t blocksize = crypto_tfm_alg_blocksize(
1322 crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1323 struct artpec6_crypto_req_common *common = &req_ctx->common;
1324 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1325 enum artpec6_crypto_variant variant = ac->variant;
1327 bool ext_ctx = false;
1328 bool run_hw = false;
1331 artpec6_crypto_init_dma_operation(common);
1333 /* Upload HMAC key, must be first the first packet */
1334 if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1335 if (variant == ARTPEC6_CRYPTO) {
1336 req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1337 a6_regk_crypto_dlkey);
1339 req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1340 a7_regk_crypto_dlkey);
1343 /* Copy and pad up the key */
1344 memcpy(req_ctx->key_buffer, ctx->hmac_key,
1345 ctx->hmac_key_length);
1346 memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1347 blocksize - ctx->hmac_key_length);
1349 error = artpec6_crypto_setup_out_descr(common,
1350 (void *)&req_ctx->key_md,
1351 sizeof(req_ctx->key_md), false, false);
1355 error = artpec6_crypto_setup_out_descr(common,
1356 req_ctx->key_buffer, blocksize,
1362 if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1363 /* Restore context */
1364 sel_ctx = regk_crypto_ext;
1367 sel_ctx = regk_crypto_init;
1370 if (variant == ARTPEC6_CRYPTO) {
1371 req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1372 req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1374 /* If this is the final round, set the final flag */
1375 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1376 req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1378 req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1379 req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1381 /* If this is the final round, set the final flag */
1382 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1383 req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1386 /* Setup up metadata descriptors */
1387 error = artpec6_crypto_setup_out_descr(common,
1388 (void *)&req_ctx->hash_md,
1389 sizeof(req_ctx->hash_md), false, false);
1393 error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1398 error = artpec6_crypto_setup_out_descr(common,
1399 req_ctx->digeststate,
1400 contextsize, false, false);
1406 if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1407 size_t done_bytes = 0;
1408 size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1409 size_t ready_bytes = round_down(total_bytes, blocksize);
1410 struct artpec6_crypto_walk walk;
1412 run_hw = ready_bytes > 0;
1413 if (req_ctx->partial_bytes && ready_bytes) {
1414 /* We have a partial buffer and will at least some bytes
1415 * to the HW. Empty this partial buffer before tackling
1418 memcpy(req_ctx->partial_buffer_out,
1419 req_ctx->partial_buffer,
1420 req_ctx->partial_bytes);
1422 error = artpec6_crypto_setup_out_descr(common,
1423 req_ctx->partial_buffer_out,
1424 req_ctx->partial_bytes,
1429 /* Reset partial buffer */
1430 done_bytes += req_ctx->partial_bytes;
1431 req_ctx->partial_bytes = 0;
1434 artpec6_crypto_walk_init(&walk, areq->src);
1436 error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1443 size_t sg_skip = ready_bytes - done_bytes;
1444 size_t sg_rem = areq->nbytes - sg_skip;
1446 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1447 req_ctx->partial_buffer +
1448 req_ctx->partial_bytes,
1451 req_ctx->partial_bytes += sg_rem;
1454 req_ctx->digcnt += ready_bytes;
1455 req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1459 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1460 bool needtrim = contextsize != digestsize;
1461 size_t hash_pad_len;
1465 if (variant == ARTPEC6_CRYPTO)
1466 oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1468 oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1470 /* Write out the partial buffer if present */
1471 if (req_ctx->partial_bytes) {
1472 memcpy(req_ctx->partial_buffer_out,
1473 req_ctx->partial_buffer,
1474 req_ctx->partial_bytes);
1475 error = artpec6_crypto_setup_out_descr(common,
1476 req_ctx->partial_buffer_out,
1477 req_ctx->partial_bytes,
1482 req_ctx->digcnt += req_ctx->partial_bytes;
1483 req_ctx->partial_bytes = 0;
1486 if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1487 digest_bits = 8 * (req_ctx->digcnt + blocksize);
1489 digest_bits = 8 * req_ctx->digcnt;
1491 /* Add the hash pad */
1492 hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1493 req_ctx->digcnt, digest_bits);
1494 error = artpec6_crypto_setup_out_descr(common,
1495 req_ctx->pad_buffer,
1496 hash_pad_len, false,
1498 req_ctx->digcnt = 0;
1503 /* Descriptor for the final result */
1504 error = artpec6_crypto_setup_in_descr(common, areq->result,
1511 /* Discard the extra context bytes for SHA-384 */
1512 error = artpec6_crypto_setup_in_descr(common,
1513 req_ctx->partial_buffer,
1514 digestsize - contextsize, true);
1519 } else { /* This is not the final operation for this request */
1521 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1523 /* Save the result to the context */
1524 error = artpec6_crypto_setup_in_descr(common,
1525 req_ctx->digeststate,
1526 contextsize, false);
1532 req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1533 HASH_FLAG_FINALIZE);
1535 error = artpec6_crypto_terminate_in_descrs(common);
1539 error = artpec6_crypto_terminate_out_descrs(common);
1543 error = artpec6_crypto_dma_map_descs(common);
1547 return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1551 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1553 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1555 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1556 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1561 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1563 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1565 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
1568 CRYPTO_ALG_NEED_FALLBACK);
1569 if (IS_ERR(ctx->fallback))
1570 return PTR_ERR(ctx->fallback);
1572 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1573 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1578 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1580 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1582 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1583 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1588 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1590 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1592 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1593 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1598 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1600 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1602 memset(ctx, 0, sizeof(*ctx));
1605 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1607 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1609 crypto_free_skcipher(ctx->fallback);
1610 artpec6_crypto_aes_exit(tfm);
1614 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1615 unsigned int keylen)
1617 struct artpec6_cryptotfm_context *ctx =
1618 crypto_skcipher_ctx(cipher);
1626 crypto_skcipher_set_flags(cipher,
1627 CRYPTO_TFM_RES_BAD_KEY_LEN);
1631 memcpy(ctx->aes_key, key, keylen);
1632 ctx->key_length = keylen;
1637 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1638 unsigned int keylen)
1640 struct artpec6_cryptotfm_context *ctx =
1641 crypto_skcipher_ctx(cipher);
1644 ret = xts_check_key(&cipher->base, key, keylen);
1654 crypto_skcipher_set_flags(cipher,
1655 CRYPTO_TFM_RES_BAD_KEY_LEN);
1659 memcpy(ctx->aes_key, key, keylen);
1660 ctx->key_length = keylen;
1664 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1666 * @req: The asynch request to process
1668 * @return 0 if the dma job was successfully prepared
1671 * This function sets up the PDMA descriptors for a block cipher request.
1673 * The required padding is added for AES-CTR using a statically defined
1676 * The PDMA descriptor list will be as follows:
1678 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1679 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1682 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1685 struct artpec6_crypto_walk walk;
1686 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1687 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1688 struct artpec6_crypto_request_context *req_ctx = NULL;
1689 size_t iv_len = crypto_skcipher_ivsize(cipher);
1690 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1691 enum artpec6_crypto_variant variant = ac->variant;
1692 struct artpec6_crypto_req_common *common;
1693 bool cipher_decr = false;
1695 u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1698 req_ctx = skcipher_request_ctx(areq);
1699 common = &req_ctx->common;
1701 artpec6_crypto_init_dma_operation(common);
1703 if (variant == ARTPEC6_CRYPTO)
1704 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1706 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1708 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1709 sizeof(ctx->key_md), false, false);
1713 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1714 ctx->key_length, true, false);
1718 req_ctx->cipher_md = 0;
1720 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1721 cipher_klen = ctx->key_length/2;
1723 cipher_klen = ctx->key_length;
1726 switch (cipher_klen) {
1728 cipher_len = regk_crypto_key_128;
1731 cipher_len = regk_crypto_key_192;
1734 cipher_len = regk_crypto_key_256;
1737 pr_err("%s: Invalid key length %d!\n",
1738 MODULE_NAME, ctx->key_length);
1742 switch (ctx->crypto_type) {
1743 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1744 oper = regk_crypto_aes_ecb;
1745 cipher_decr = req_ctx->decrypt;
1748 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1749 oper = regk_crypto_aes_cbc;
1750 cipher_decr = req_ctx->decrypt;
1753 case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1754 oper = regk_crypto_aes_ctr;
1755 cipher_decr = false;
1758 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1759 oper = regk_crypto_aes_xts;
1760 cipher_decr = req_ctx->decrypt;
1762 if (variant == ARTPEC6_CRYPTO)
1763 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1765 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1769 pr_err("%s: Invalid cipher mode %d!\n",
1770 MODULE_NAME, ctx->crypto_type);
1774 if (variant == ARTPEC6_CRYPTO) {
1775 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1776 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1779 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1781 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1782 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1785 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1788 ret = artpec6_crypto_setup_out_descr(common,
1789 &req_ctx->cipher_md,
1790 sizeof(req_ctx->cipher_md),
1795 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1800 ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1806 artpec6_crypto_walk_init(&walk, areq->src);
1807 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1812 artpec6_crypto_walk_init(&walk, areq->dst);
1813 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1817 /* CTR-mode padding required by the HW. */
1818 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1819 ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1820 size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1824 ret = artpec6_crypto_setup_out_descr(common,
1830 ret = artpec6_crypto_setup_in_descr(common,
1831 ac->pad_buffer, pad,
1838 ret = artpec6_crypto_terminate_out_descrs(common);
1842 ret = artpec6_crypto_terminate_in_descrs(common);
1846 return artpec6_crypto_dma_map_descs(common);
1849 static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1853 size_t input_length;
1854 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1855 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1856 struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1857 struct artpec6_crypto_req_common *common = &req_ctx->common;
1858 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1859 enum artpec6_crypto_variant variant = ac->variant;
1862 artpec6_crypto_init_dma_operation(common);
1865 if (variant == ARTPEC6_CRYPTO) {
1866 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1867 a6_regk_crypto_dlkey);
1869 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1870 a7_regk_crypto_dlkey);
1872 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1873 sizeof(ctx->key_md), false, false);
1877 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1878 ctx->key_length, true, false);
1882 req_ctx->cipher_md = 0;
1884 switch (ctx->key_length) {
1886 md_cipher_len = regk_crypto_key_128;
1889 md_cipher_len = regk_crypto_key_192;
1892 md_cipher_len = regk_crypto_key_256;
1898 if (variant == ARTPEC6_CRYPTO) {
1899 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1900 regk_crypto_aes_gcm);
1901 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1903 if (req_ctx->decrypt)
1904 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1906 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1907 regk_crypto_aes_gcm);
1908 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1910 if (req_ctx->decrypt)
1911 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1914 ret = artpec6_crypto_setup_out_descr(common,
1915 (void *) &req_ctx->cipher_md,
1916 sizeof(req_ctx->cipher_md), false,
1921 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1925 /* For the decryption, cryptlen includes the tag. */
1926 input_length = areq->cryptlen;
1927 if (req_ctx->decrypt)
1928 input_length -= AES_BLOCK_SIZE;
1930 /* Prepare the context buffer */
1931 req_ctx->hw_ctx.aad_length_bits =
1932 __cpu_to_be64(8*areq->assoclen);
1934 req_ctx->hw_ctx.text_length_bits =
1935 __cpu_to_be64(8*input_length);
1937 memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1938 // The HW omits the initial increment of the counter field.
1939 crypto_inc(req_ctx->hw_ctx.J0+12, 4);
1941 ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1942 sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1947 struct artpec6_crypto_walk walk;
1949 artpec6_crypto_walk_init(&walk, areq->src);
1951 /* Associated data */
1952 count = areq->assoclen;
1953 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1957 if (!IS_ALIGNED(areq->assoclen, 16)) {
1958 size_t assoc_pad = 16 - (areq->assoclen % 16);
1959 /* The HW mandates zero padding here */
1960 ret = artpec6_crypto_setup_out_descr(common,
1968 /* Data to crypto */
1969 count = input_length;
1970 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1974 if (!IS_ALIGNED(input_length, 16)) {
1975 size_t crypto_pad = 16 - (input_length % 16);
1976 /* The HW mandates zero padding here */
1977 ret = artpec6_crypto_setup_out_descr(common,
1987 /* Data from crypto */
1989 struct artpec6_crypto_walk walk;
1990 size_t output_len = areq->cryptlen;
1992 if (req_ctx->decrypt)
1993 output_len -= AES_BLOCK_SIZE;
1995 artpec6_crypto_walk_init(&walk, areq->dst);
1997 /* skip associated data in the output */
1998 count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
2003 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
2007 /* Put padding between the cryptotext and the auth tag */
2008 if (!IS_ALIGNED(output_len, 16)) {
2009 size_t crypto_pad = 16 - (output_len % 16);
2011 ret = artpec6_crypto_setup_in_descr(common,
2018 /* The authentication tag shall follow immediately after
2019 * the output ciphertext. For decryption it is put in a context
2020 * buffer for later compare against the input tag.
2022 count = AES_BLOCK_SIZE;
2024 if (req_ctx->decrypt) {
2025 ret = artpec6_crypto_setup_in_descr(common,
2026 req_ctx->decryption_tag, count, false);
2031 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2039 ret = artpec6_crypto_terminate_in_descrs(common);
2043 ret = artpec6_crypto_terminate_out_descrs(common);
2047 return artpec6_crypto_dma_map_descs(common);
2050 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
2052 struct artpec6_crypto_req_common *req;
2054 while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2055 req = list_first_entry(&ac->queue,
2056 struct artpec6_crypto_req_common,
2058 list_move_tail(&req->list, &ac->pending);
2059 artpec6_crypto_start_dma(req);
2061 req->req->complete(req->req, -EINPROGRESS);
2065 * In some cases, the hardware can raise an in_eop_flush interrupt
2066 * before actually updating the status, so we have an timer which will
2067 * recheck the status on timeout. Since the cases are expected to be
2068 * very rare, we use a relatively large timeout value. There should be
2069 * no noticeable negative effect if we timeout spuriously.
2071 if (ac->pending_count)
2072 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2074 del_timer(&ac->timer);
2077 static void artpec6_crypto_timeout(unsigned long data)
2079 struct artpec6_crypto *ac = (struct artpec6_crypto *) data;
2081 dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2083 tasklet_schedule(&ac->task);
2086 static void artpec6_crypto_task(unsigned long data)
2088 struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2089 struct artpec6_crypto_req_common *req;
2090 struct artpec6_crypto_req_common *n;
2092 if (list_empty(&ac->pending)) {
2093 pr_debug("Spurious IRQ\n");
2097 spin_lock_bh(&ac->queue_lock);
2099 list_for_each_entry_safe(req, n, &ac->pending, list) {
2100 struct artpec6_crypto_dma_descriptors *dma = req->dma;
2103 dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
2104 sizeof(dma->stat[0]),
2107 stat = req->dma->stat[req->dma->in_cnt-1];
2109 /* A non-zero final status descriptor indicates
2110 * this job has finished.
2112 pr_debug("Request %p status is %X\n", req, stat);
2116 /* Allow testing of timeout handling with fault injection */
2117 #ifdef CONFIG_FAULT_INJECTION
2118 if (should_fail(&artpec6_crypto_fail_status_read, 1))
2122 pr_debug("Completing request %p\n", req);
2124 list_del(&req->list);
2126 artpec6_crypto_dma_unmap_all(req);
2127 artpec6_crypto_copy_bounce_buffers(req);
2129 ac->pending_count--;
2130 artpec6_crypto_common_destroy(req);
2131 req->complete(req->req);
2134 artpec6_crypto_process_queue(ac);
2136 spin_unlock_bh(&ac->queue_lock);
2139 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2141 req->complete(req, 0);
2145 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2147 struct skcipher_request *cipher_req = container_of(req,
2148 struct skcipher_request, base);
2150 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2151 cipher_req->cryptlen - AES_BLOCK_SIZE,
2153 req->complete(req, 0);
2157 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2159 struct skcipher_request *cipher_req = container_of(req,
2160 struct skcipher_request, base);
2162 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2163 cipher_req->cryptlen - AES_BLOCK_SIZE,
2165 req->complete(req, 0);
2168 static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2172 /* Verify GCM hashtag. */
2173 struct aead_request *areq = container_of(req,
2174 struct aead_request, base);
2175 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2177 if (req_ctx->decrypt) {
2178 u8 input_tag[AES_BLOCK_SIZE];
2180 sg_pcopy_to_buffer(areq->src,
2181 sg_nents(areq->src),
2184 areq->assoclen + areq->cryptlen -
2187 if (memcmp(req_ctx->decryption_tag,
2190 pr_debug("***EBADMSG:\n");
2191 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2192 input_tag, AES_BLOCK_SIZE, true);
2193 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2194 req_ctx->decryption_tag,
2195 AES_BLOCK_SIZE, true);
2201 req->complete(req, result);
2204 static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2206 req->complete(req, 0);
2210 /*------------------- Hash functions -----------------------------------------*/
2212 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2213 const u8 *key, unsigned int keylen)
2215 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2220 pr_err("Invalid length (%d) of HMAC key\n",
2225 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2227 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2229 if (keylen > blocksize) {
2230 SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
2232 hdesc->tfm = tfm_ctx->child_hash;
2233 hdesc->flags = crypto_ahash_get_flags(tfm) &
2234 CRYPTO_TFM_REQ_MAY_SLEEP;
2236 tfm_ctx->hmac_key_length = blocksize;
2237 ret = crypto_shash_digest(hdesc, key, keylen,
2243 memcpy(tfm_ctx->hmac_key, key, keylen);
2244 tfm_ctx->hmac_key_length = keylen;
2251 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2253 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2254 enum artpec6_crypto_variant variant = ac->variant;
2255 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2258 memset(req_ctx, 0, sizeof(*req_ctx));
2260 req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2262 req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2265 case ARTPEC6_CRYPTO_HASH_SHA1:
2266 oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2268 case ARTPEC6_CRYPTO_HASH_SHA256:
2269 oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2271 case ARTPEC6_CRYPTO_HASH_SHA384:
2272 oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
2274 case ARTPEC6_CRYPTO_HASH_SHA512:
2275 oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
2279 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2283 if (variant == ARTPEC6_CRYPTO)
2284 req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2286 req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2291 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2293 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2296 if (!req_ctx->common.dma) {
2297 ret = artpec6_crypto_common_init(&req_ctx->common,
2299 artpec6_crypto_complete_hash,
2306 ret = artpec6_crypto_prepare_hash(req);
2308 case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2309 ret = artpec6_crypto_submit(&req_ctx->common);
2312 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2317 artpec6_crypto_common_destroy(&req_ctx->common);
2324 static int artpec6_crypto_hash_final(struct ahash_request *req)
2326 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2328 req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2330 return artpec6_crypto_prepare_submit_hash(req);
2333 static int artpec6_crypto_hash_update(struct ahash_request *req)
2335 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2337 req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2339 return artpec6_crypto_prepare_submit_hash(req);
2342 static int artpec6_crypto_sha1_init(struct ahash_request *req)
2344 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2347 static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2349 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2351 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2353 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2355 return artpec6_crypto_prepare_submit_hash(req);
2358 static int artpec6_crypto_sha256_init(struct ahash_request *req)
2360 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2363 static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2365 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2367 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2368 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2370 return artpec6_crypto_prepare_submit_hash(req);
2373 static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
2375 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
2378 static int __maybe_unused
2379 artpec6_crypto_sha384_digest(struct ahash_request *req)
2381 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2383 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
2384 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2386 return artpec6_crypto_prepare_submit_hash(req);
2389 static int artpec6_crypto_sha512_init(struct ahash_request *req)
2391 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
2394 static int artpec6_crypto_sha512_digest(struct ahash_request *req)
2396 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2398 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
2399 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2401 return artpec6_crypto_prepare_submit_hash(req);
2404 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2406 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2409 static int __maybe_unused
2410 artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
2412 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
2415 static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
2417 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
2420 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2422 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2424 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2425 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2427 return artpec6_crypto_prepare_submit_hash(req);
2430 static int __maybe_unused
2431 artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
2433 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2435 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
2436 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2438 return artpec6_crypto_prepare_submit_hash(req);
2441 static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
2443 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2445 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
2446 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2448 return artpec6_crypto_prepare_submit_hash(req);
2451 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2452 const char *base_hash_name)
2454 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2456 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2457 sizeof(struct artpec6_hash_request_context));
2458 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2460 if (base_hash_name) {
2461 struct crypto_shash *child;
2463 child = crypto_alloc_shash(base_hash_name, 0,
2464 CRYPTO_ALG_NEED_FALLBACK);
2467 return PTR_ERR(child);
2469 tfm_ctx->child_hash = child;
2475 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2477 return artpec6_crypto_ahash_init_common(tfm, NULL);
2480 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2482 return artpec6_crypto_ahash_init_common(tfm, "sha256");
2485 static int __maybe_unused
2486 artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
2488 return artpec6_crypto_ahash_init_common(tfm, "sha384");
2491 static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
2493 return artpec6_crypto_ahash_init_common(tfm, "sha512");
2496 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2498 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2500 if (tfm_ctx->child_hash)
2501 crypto_free_shash(tfm_ctx->child_hash);
2503 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2504 tfm_ctx->hmac_key_length = 0;
2507 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2509 const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2510 struct artpec6_hash_export_state *state = out;
2511 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2512 enum artpec6_crypto_variant variant = ac->variant;
2514 BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2515 sizeof(ctx->partial_buffer));
2516 BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2518 state->digcnt = ctx->digcnt;
2519 state->partial_bytes = ctx->partial_bytes;
2520 state->hash_flags = ctx->hash_flags;
2522 if (variant == ARTPEC6_CRYPTO)
2523 state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2525 state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2527 memcpy(state->partial_buffer, ctx->partial_buffer,
2528 sizeof(state->partial_buffer));
2529 memcpy(state->digeststate, ctx->digeststate,
2530 sizeof(state->digeststate));
2535 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2537 struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2538 const struct artpec6_hash_export_state *state = in;
2539 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2540 enum artpec6_crypto_variant variant = ac->variant;
2542 memset(ctx, 0, sizeof(*ctx));
2544 ctx->digcnt = state->digcnt;
2545 ctx->partial_bytes = state->partial_bytes;
2546 ctx->hash_flags = state->hash_flags;
2548 if (variant == ARTPEC6_CRYPTO)
2549 ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2551 ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2553 memcpy(ctx->partial_buffer, state->partial_buffer,
2554 sizeof(state->partial_buffer));
2555 memcpy(ctx->digeststate, state->digeststate,
2556 sizeof(state->digeststate));
2561 static int init_crypto_hw(struct artpec6_crypto *ac)
2563 enum artpec6_crypto_variant variant = ac->variant;
2564 void __iomem *base = ac->base;
2565 u32 out_descr_buf_size;
2566 u32 out_data_buf_size;
2567 u32 in_data_buf_size;
2568 u32 in_descr_buf_size;
2569 u32 in_stat_buf_size;
2573 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2574 * channels and 1024 bytes for the IN channel. This is an elastic
2575 * memory used to internally store the descriptors and data. The values
2576 * ares specified in 64 byte incremements. Trustzone buffers are not
2577 * used at this stage.
2579 out_data_buf_size = 16; /* 1024 bytes for data */
2580 out_descr_buf_size = 15; /* 960 bytes for descriptors */
2581 in_data_buf_size = 8; /* 512 bytes for data */
2582 in_descr_buf_size = 4; /* 256 bytes for descriptors */
2583 in_stat_buf_size = 4; /* 256 bytes for stat descrs */
2585 BUILD_BUG_ON_MSG((out_data_buf_size
2586 + out_descr_buf_size) * 64 > 1984,
2587 "Invalid OUT configuration");
2589 BUILD_BUG_ON_MSG((in_data_buf_size
2591 + in_stat_buf_size) * 64 > 1024,
2592 "Invalid IN configuration");
2594 in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2595 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2596 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2598 out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2599 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2601 writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2602 writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2604 if (variant == ARTPEC6_CRYPTO) {
2605 writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2606 writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2607 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2608 A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2609 base + A6_PDMA_INTR_MASK);
2611 writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2612 writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2613 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2614 A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2615 base + A7_PDMA_INTR_MASK);
2621 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2623 enum artpec6_crypto_variant variant = ac->variant;
2624 void __iomem *base = ac->base;
2626 if (variant == ARTPEC6_CRYPTO) {
2627 writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2628 writel_relaxed(0, base + A6_PDMA_IN_CFG);
2629 writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2631 writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2632 writel_relaxed(0, base + A7_PDMA_IN_CFG);
2633 writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2636 writel_relaxed(0, base + PDMA_OUT_CFG);
2640 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2642 struct artpec6_crypto *ac = dev_id;
2643 enum artpec6_crypto_variant variant = ac->variant;
2644 void __iomem *base = ac->base;
2645 u32 mask_in_data, mask_in_eop_flush;
2646 u32 in_cmd_flush_stat, in_cmd_reg;
2651 if (variant == ARTPEC6_CRYPTO) {
2652 intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2653 mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2654 mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2655 in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2656 in_cmd_reg = A6_PDMA_IN_CMD;
2657 ack_intr_reg = A6_PDMA_ACK_INTR;
2659 intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2660 mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2661 mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2662 in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2663 in_cmd_reg = A7_PDMA_IN_CMD;
2664 ack_intr_reg = A7_PDMA_ACK_INTR;
2667 /* We get two interrupt notifications from each job.
2668 * The in_data means all data was sent to memory and then
2669 * we request a status flush command to write the per-job
2670 * status to its status vector. This ensures that the
2671 * tasklet can detect exactly how many submitted jobs
2672 * that have finished.
2674 if (intr & mask_in_data)
2675 ack |= mask_in_data;
2677 if (intr & mask_in_eop_flush)
2678 ack |= mask_in_eop_flush;
2680 writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2682 writel_relaxed(ack, base + ack_intr_reg);
2684 if (intr & mask_in_eop_flush)
2685 tasklet_schedule(&ac->task);
2690 /*------------------- Algorithm definitions ----------------------------------*/
2693 static struct ahash_alg hash_algos[] = {
2696 .init = artpec6_crypto_sha1_init,
2697 .update = artpec6_crypto_hash_update,
2698 .final = artpec6_crypto_hash_final,
2699 .digest = artpec6_crypto_sha1_digest,
2700 .import = artpec6_crypto_hash_import,
2701 .export = artpec6_crypto_hash_export,
2702 .halg.digestsize = SHA1_DIGEST_SIZE,
2703 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2706 .cra_driver_name = "artpec-sha1",
2707 .cra_priority = 300,
2708 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2709 .cra_blocksize = SHA1_BLOCK_SIZE,
2710 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2712 .cra_module = THIS_MODULE,
2713 .cra_init = artpec6_crypto_ahash_init,
2714 .cra_exit = artpec6_crypto_ahash_exit,
2719 .init = artpec6_crypto_sha256_init,
2720 .update = artpec6_crypto_hash_update,
2721 .final = artpec6_crypto_hash_final,
2722 .digest = artpec6_crypto_sha256_digest,
2723 .import = artpec6_crypto_hash_import,
2724 .export = artpec6_crypto_hash_export,
2725 .halg.digestsize = SHA256_DIGEST_SIZE,
2726 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2728 .cra_name = "sha256",
2729 .cra_driver_name = "artpec-sha256",
2730 .cra_priority = 300,
2731 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2732 .cra_blocksize = SHA256_BLOCK_SIZE,
2733 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2735 .cra_module = THIS_MODULE,
2736 .cra_init = artpec6_crypto_ahash_init,
2737 .cra_exit = artpec6_crypto_ahash_exit,
2742 .init = artpec6_crypto_hmac_sha256_init,
2743 .update = artpec6_crypto_hash_update,
2744 .final = artpec6_crypto_hash_final,
2745 .digest = artpec6_crypto_hmac_sha256_digest,
2746 .import = artpec6_crypto_hash_import,
2747 .export = artpec6_crypto_hash_export,
2748 .setkey = artpec6_crypto_hash_set_key,
2749 .halg.digestsize = SHA256_DIGEST_SIZE,
2750 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2752 .cra_name = "hmac(sha256)",
2753 .cra_driver_name = "artpec-hmac-sha256",
2754 .cra_priority = 300,
2755 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2756 .cra_blocksize = SHA256_BLOCK_SIZE,
2757 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2759 .cra_module = THIS_MODULE,
2760 .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2761 .cra_exit = artpec6_crypto_ahash_exit,
2766 static struct ahash_alg artpec7_hash_algos[] = {
2769 .init = artpec6_crypto_sha384_init,
2770 .update = artpec6_crypto_hash_update,
2771 .final = artpec6_crypto_hash_final,
2772 .digest = artpec6_crypto_sha384_digest,
2773 .import = artpec6_crypto_hash_import,
2774 .export = artpec6_crypto_hash_export,
2775 .halg.digestsize = SHA384_DIGEST_SIZE,
2776 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2778 .cra_name = "sha384",
2779 .cra_driver_name = "artpec-sha384",
2780 .cra_priority = 300,
2781 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2782 .cra_blocksize = SHA384_BLOCK_SIZE,
2783 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2785 .cra_module = THIS_MODULE,
2786 .cra_init = artpec6_crypto_ahash_init,
2787 .cra_exit = artpec6_crypto_ahash_exit,
2792 .init = artpec6_crypto_hmac_sha384_init,
2793 .update = artpec6_crypto_hash_update,
2794 .final = artpec6_crypto_hash_final,
2795 .digest = artpec6_crypto_hmac_sha384_digest,
2796 .import = artpec6_crypto_hash_import,
2797 .export = artpec6_crypto_hash_export,
2798 .setkey = artpec6_crypto_hash_set_key,
2799 .halg.digestsize = SHA384_DIGEST_SIZE,
2800 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2802 .cra_name = "hmac(sha384)",
2803 .cra_driver_name = "artpec-hmac-sha384",
2804 .cra_priority = 300,
2805 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2806 .cra_blocksize = SHA384_BLOCK_SIZE,
2807 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2809 .cra_module = THIS_MODULE,
2810 .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
2811 .cra_exit = artpec6_crypto_ahash_exit,
2816 .init = artpec6_crypto_sha512_init,
2817 .update = artpec6_crypto_hash_update,
2818 .final = artpec6_crypto_hash_final,
2819 .digest = artpec6_crypto_sha512_digest,
2820 .import = artpec6_crypto_hash_import,
2821 .export = artpec6_crypto_hash_export,
2822 .halg.digestsize = SHA512_DIGEST_SIZE,
2823 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2825 .cra_name = "sha512",
2826 .cra_driver_name = "artpec-sha512",
2827 .cra_priority = 300,
2828 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2829 .cra_blocksize = SHA512_BLOCK_SIZE,
2830 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2832 .cra_module = THIS_MODULE,
2833 .cra_init = artpec6_crypto_ahash_init,
2834 .cra_exit = artpec6_crypto_ahash_exit,
2839 .init = artpec6_crypto_hmac_sha512_init,
2840 .update = artpec6_crypto_hash_update,
2841 .final = artpec6_crypto_hash_final,
2842 .digest = artpec6_crypto_hmac_sha512_digest,
2843 .import = artpec6_crypto_hash_import,
2844 .export = artpec6_crypto_hash_export,
2845 .setkey = artpec6_crypto_hash_set_key,
2846 .halg.digestsize = SHA512_DIGEST_SIZE,
2847 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2849 .cra_name = "hmac(sha512)",
2850 .cra_driver_name = "artpec-hmac-sha512",
2851 .cra_priority = 300,
2852 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2853 .cra_blocksize = SHA512_BLOCK_SIZE,
2854 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2856 .cra_module = THIS_MODULE,
2857 .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
2858 .cra_exit = artpec6_crypto_ahash_exit,
2864 static struct skcipher_alg crypto_algos[] = {
2868 .cra_name = "ecb(aes)",
2869 .cra_driver_name = "artpec6-ecb-aes",
2870 .cra_priority = 300,
2871 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2873 .cra_blocksize = AES_BLOCK_SIZE,
2874 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2876 .cra_module = THIS_MODULE,
2878 .min_keysize = AES_MIN_KEY_SIZE,
2879 .max_keysize = AES_MAX_KEY_SIZE,
2880 .setkey = artpec6_crypto_cipher_set_key,
2881 .encrypt = artpec6_crypto_encrypt,
2882 .decrypt = artpec6_crypto_decrypt,
2883 .init = artpec6_crypto_aes_ecb_init,
2884 .exit = artpec6_crypto_aes_exit,
2889 .cra_name = "ctr(aes)",
2890 .cra_driver_name = "artpec6-ctr-aes",
2891 .cra_priority = 300,
2892 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2894 CRYPTO_ALG_NEED_FALLBACK,
2896 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2898 .cra_module = THIS_MODULE,
2900 .min_keysize = AES_MIN_KEY_SIZE,
2901 .max_keysize = AES_MAX_KEY_SIZE,
2902 .ivsize = AES_BLOCK_SIZE,
2903 .setkey = artpec6_crypto_cipher_set_key,
2904 .encrypt = artpec6_crypto_ctr_encrypt,
2905 .decrypt = artpec6_crypto_ctr_decrypt,
2906 .init = artpec6_crypto_aes_ctr_init,
2907 .exit = artpec6_crypto_aes_ctr_exit,
2912 .cra_name = "cbc(aes)",
2913 .cra_driver_name = "artpec6-cbc-aes",
2914 .cra_priority = 300,
2915 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2917 .cra_blocksize = AES_BLOCK_SIZE,
2918 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2920 .cra_module = THIS_MODULE,
2922 .min_keysize = AES_MIN_KEY_SIZE,
2923 .max_keysize = AES_MAX_KEY_SIZE,
2924 .ivsize = AES_BLOCK_SIZE,
2925 .setkey = artpec6_crypto_cipher_set_key,
2926 .encrypt = artpec6_crypto_encrypt,
2927 .decrypt = artpec6_crypto_decrypt,
2928 .init = artpec6_crypto_aes_cbc_init,
2929 .exit = artpec6_crypto_aes_exit
2934 .cra_name = "xts(aes)",
2935 .cra_driver_name = "artpec6-xts-aes",
2936 .cra_priority = 300,
2937 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2940 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2942 .cra_module = THIS_MODULE,
2944 .min_keysize = 2*AES_MIN_KEY_SIZE,
2945 .max_keysize = 2*AES_MAX_KEY_SIZE,
2947 .setkey = artpec6_crypto_xts_set_key,
2948 .encrypt = artpec6_crypto_encrypt,
2949 .decrypt = artpec6_crypto_decrypt,
2950 .init = artpec6_crypto_aes_xts_init,
2951 .exit = artpec6_crypto_aes_exit,
2955 static struct aead_alg aead_algos[] = {
2957 .init = artpec6_crypto_aead_init,
2958 .setkey = artpec6_crypto_aead_set_key,
2959 .encrypt = artpec6_crypto_aead_encrypt,
2960 .decrypt = artpec6_crypto_aead_decrypt,
2961 .ivsize = AES_BLOCK_SIZE,
2962 .maxauthsize = AES_BLOCK_SIZE,
2965 .cra_name = "gcm(aes)",
2966 .cra_driver_name = "artpec-gcm-aes",
2967 .cra_priority = 300,
2968 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
2969 CRYPTO_ALG_KERN_DRIVER_ONLY,
2971 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2973 .cra_module = THIS_MODULE,
2978 #ifdef CONFIG_DEBUG_FS
2987 static void artpec6_crypto_init_debugfs(void)
2989 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2991 if (!dbgfs_root || IS_ERR(dbgfs_root)) {
2993 pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
2997 #ifdef CONFIG_FAULT_INJECTION
2998 fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2999 &artpec6_crypto_fail_status_read);
3001 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
3002 &artpec6_crypto_fail_dma_array_full);
3006 static void artpec6_crypto_free_debugfs(void)
3011 debugfs_remove_recursive(dbgfs_root);
3016 static const struct of_device_id artpec6_crypto_of_match[] = {
3017 { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
3018 { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
3021 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
3023 static int artpec6_crypto_probe(struct platform_device *pdev)
3025 const struct of_device_id *match;
3026 enum artpec6_crypto_variant variant;
3027 struct artpec6_crypto *ac;
3028 struct device *dev = &pdev->dev;
3030 struct resource *res;
3034 if (artpec6_crypto_dev)
3037 match = of_match_node(artpec6_crypto_of_match, dev->of_node);
3041 variant = (enum artpec6_crypto_variant)match->data;
3043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3047 base = devm_ioremap_resource(&pdev->dev, res);
3049 return PTR_ERR(base);
3051 irq = platform_get_irq(pdev, 0);
3055 ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
3060 platform_set_drvdata(pdev, ac);
3061 ac->variant = variant;
3063 spin_lock_init(&ac->queue_lock);
3064 INIT_LIST_HEAD(&ac->queue);
3065 INIT_LIST_HEAD(&ac->pending);
3066 setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac);
3070 ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
3071 sizeof(struct artpec6_crypto_dma_descriptors),
3078 #ifdef CONFIG_DEBUG_FS
3079 artpec6_crypto_init_debugfs();
3082 tasklet_init(&ac->task, artpec6_crypto_task,
3085 ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
3087 if (!ac->pad_buffer)
3089 ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
3091 ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
3093 if (!ac->zero_buffer)
3095 ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
3097 err = init_crypto_hw(ac);
3101 err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
3102 "artpec6-crypto", ac);
3106 artpec6_crypto_dev = &pdev->dev;
3108 err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3110 dev_err(dev, "Failed to register ahashes\n");
3114 if (variant != ARTPEC6_CRYPTO) {
3115 err = crypto_register_ahashes(artpec7_hash_algos,
3116 ARRAY_SIZE(artpec7_hash_algos));
3118 dev_err(dev, "Failed to register ahashes\n");
3119 goto unregister_ahashes;
3123 err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3125 dev_err(dev, "Failed to register ciphers\n");
3126 goto unregister_a7_ahashes;
3129 err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
3131 dev_err(dev, "Failed to register aeads\n");
3132 goto unregister_algs;
3138 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3139 unregister_a7_ahashes:
3140 if (variant != ARTPEC6_CRYPTO)
3141 crypto_unregister_ahashes(artpec7_hash_algos,
3142 ARRAY_SIZE(artpec7_hash_algos));
3144 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3146 artpec6_crypto_disable_hw(ac);
3148 kmem_cache_destroy(ac->dma_cache);
3152 static int artpec6_crypto_remove(struct platform_device *pdev)
3154 struct artpec6_crypto *ac = platform_get_drvdata(pdev);
3155 int irq = platform_get_irq(pdev, 0);
3157 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3158 if (ac->variant != ARTPEC6_CRYPTO)
3159 crypto_unregister_ahashes(artpec7_hash_algos,
3160 ARRAY_SIZE(artpec7_hash_algos));
3161 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3162 crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
3164 tasklet_disable(&ac->task);
3165 devm_free_irq(&pdev->dev, irq, ac);
3166 tasklet_kill(&ac->task);
3167 del_timer_sync(&ac->timer);
3169 artpec6_crypto_disable_hw(ac);
3171 kmem_cache_destroy(ac->dma_cache);
3172 #ifdef CONFIG_DEBUG_FS
3173 artpec6_crypto_free_debugfs();
3178 static struct platform_driver artpec6_crypto_driver = {
3179 .probe = artpec6_crypto_probe,
3180 .remove = artpec6_crypto_remove,
3182 .name = "artpec6-crypto",
3183 .owner = THIS_MODULE,
3184 .of_match_table = artpec6_crypto_of_match,
3188 module_platform_driver(artpec6_crypto_driver);
3190 MODULE_AUTHOR("Axis Communications AB");
3191 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
3192 MODULE_LICENSE("GPL");