1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * AMCC SoC PPC4xx Crypto Driver
5 * Copyright (c) 2008 Applied Micro Circuits Corporation.
6 * All rights reserved. James Hsiao <jhsiao@amcc.com>
8 * This file implements AMCC crypto offload Linux device driver for use with
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/spinlock_types.h>
15 #include <linux/random.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/platform_device.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/slab.h>
27 #include <asm/dcr-regs.h>
28 #include <asm/cacheflush.h>
29 #include <crypto/aead.h>
30 #include <crypto/aes.h>
31 #include <crypto/ctr.h>
32 #include <crypto/gcm.h>
33 #include <crypto/sha.h>
34 #include <crypto/rng.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/skcipher.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/internal/rng.h>
39 #include <crypto/internal/skcipher.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
43 #include "crypto4xx_trng.h"
45 #define PPC4XX_SEC_VERSION_STR "0.5"
48 * PPC4xx Crypto Engine Initialization Routine
50 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
52 union ce_ring_size ring_size;
53 union ce_ring_control ring_ctrl;
54 union ce_part_ring_size part_ring_size;
55 union ce_io_threshold io_threshold;
57 union ce_pe_dma_cfg pe_dma_cfg;
60 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
61 /* setup pe dma, include reset sg, pdr and pe, then release reset */
63 pe_dma_cfg.bf.bo_sgpd_en = 1;
64 pe_dma_cfg.bf.bo_data_en = 0;
65 pe_dma_cfg.bf.bo_sa_en = 1;
66 pe_dma_cfg.bf.bo_pd_en = 1;
67 pe_dma_cfg.bf.dynamic_sa_en = 1;
68 pe_dma_cfg.bf.reset_sg = 1;
69 pe_dma_cfg.bf.reset_pdr = 1;
70 pe_dma_cfg.bf.reset_pe = 1;
71 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
72 /* un reset pe,sg and pdr */
73 pe_dma_cfg.bf.pe_mode = 0;
74 pe_dma_cfg.bf.reset_sg = 0;
75 pe_dma_cfg.bf.reset_pdr = 0;
76 pe_dma_cfg.bf.reset_pe = 0;
77 pe_dma_cfg.bf.bo_td_en = 0;
78 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
80 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
81 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
82 get_random_bytes(&rand_num, sizeof(rand_num));
83 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
84 get_random_bytes(&rand_num, sizeof(rand_num));
85 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
87 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
88 ring_size.bf.ring_size = PPC4XX_NUM_PD;
89 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
91 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
92 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
93 device_ctrl |= PPC4XX_DC_3DES_EN;
94 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
95 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
96 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
98 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
99 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
100 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
101 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
103 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
104 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
105 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
106 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
107 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
108 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
109 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
110 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
111 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
112 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
113 /* un reset pe,sg and pdr */
114 pe_dma_cfg.bf.pe_mode = 1;
115 pe_dma_cfg.bf.reset_sg = 0;
116 pe_dma_cfg.bf.reset_pdr = 0;
117 pe_dma_cfg.bf.reset_pe = 0;
118 pe_dma_cfg.bf.bo_td_en = 0;
119 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
120 /*clear all pending interrupt*/
121 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
122 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
124 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
126 writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
127 dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
128 writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
129 dev->ce_base + CRYPTO4XX_INT_EN);
131 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
135 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
137 ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
138 if (ctx->sa_in == NULL)
141 ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
142 if (ctx->sa_out == NULL) {
153 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
163 * alloc memory for the gather ring
164 * no need to alloc buf for the ring
165 * gdr_tail, gdr_head and gdr_count are initialized by this function
167 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
170 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
171 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
172 &dev->pdr_pa, GFP_ATOMIC);
176 dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
178 if (!dev->pdr_uinfo) {
179 dma_free_coherent(dev->core_dev->device,
180 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
185 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
186 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
187 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
188 &dev->shadow_sa_pool_pa,
190 if (!dev->shadow_sa_pool)
193 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
194 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
195 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
196 if (!dev->shadow_sr_pool)
198 for (i = 0; i < PPC4XX_NUM_PD; i++) {
199 struct ce_pd *pd = &dev->pdr[i];
200 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
202 pd->sa = dev->shadow_sa_pool_pa +
203 sizeof(union shadow_sa_buf) * i;
205 /* alloc 256 bytes which is enough for any kind of dynamic sa */
206 pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
208 /* alloc state record */
209 pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
210 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
211 sizeof(struct sa_state_record) * i;
217 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
220 dma_free_coherent(dev->core_dev->device,
221 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
222 dev->pdr, dev->pdr_pa);
224 if (dev->shadow_sa_pool)
225 dma_free_coherent(dev->core_dev->device,
226 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
227 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
229 if (dev->shadow_sr_pool)
230 dma_free_coherent(dev->core_dev->device,
231 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
232 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
234 kfree(dev->pdr_uinfo);
237 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
242 retval = dev->pdr_head;
243 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
245 if (tmp == dev->pdr_tail)
246 return ERING_WAS_FULL;
253 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
255 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
259 spin_lock_irqsave(&dev->core_dev->lock, flags);
260 pd_uinfo->state = PD_ENTRY_FREE;
262 if (dev->pdr_tail != PPC4XX_LAST_PD)
266 tail = dev->pdr_tail;
267 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
273 * alloc memory for the gather ring
274 * no need to alloc buf for the ring
275 * gdr_tail, gdr_head and gdr_count are initialized by this function
277 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
279 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
280 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
281 &dev->gdr_pa, GFP_ATOMIC);
288 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
290 dma_free_coherent(dev->core_dev->device,
291 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
292 dev->gdr, dev->gdr_pa);
296 * when this function is called.
297 * preemption or interrupt must be disabled
299 static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
304 if (n >= PPC4XX_NUM_GD)
305 return ERING_WAS_FULL;
307 retval = dev->gdr_head;
308 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
309 if (dev->gdr_head > dev->gdr_tail) {
310 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
311 return ERING_WAS_FULL;
312 } else if (dev->gdr_head < dev->gdr_tail) {
313 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
314 return ERING_WAS_FULL;
321 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
325 spin_lock_irqsave(&dev->core_dev->lock, flags);
326 if (dev->gdr_tail == dev->gdr_head) {
327 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
331 if (dev->gdr_tail != PPC4XX_LAST_GD)
336 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
341 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
342 dma_addr_t *gd_dma, u32 idx)
344 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
346 return &dev->gdr[idx];
350 * alloc memory for the scatter ring
351 * need to alloc buf for the ring
352 * sdr_tail, sdr_head and sdr_count are initialized by this function
354 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
358 /* alloc memory for scatter descriptor ring */
359 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
360 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
361 &dev->sdr_pa, GFP_ATOMIC);
365 dev->scatter_buffer_va =
366 dma_alloc_coherent(dev->core_dev->device,
367 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
368 &dev->scatter_buffer_pa, GFP_ATOMIC);
369 if (!dev->scatter_buffer_va) {
370 dma_free_coherent(dev->core_dev->device,
371 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
372 dev->sdr, dev->sdr_pa);
376 for (i = 0; i < PPC4XX_NUM_SD; i++) {
377 dev->sdr[i].ptr = dev->scatter_buffer_pa +
378 PPC4XX_SD_BUFFER_SIZE * i;
384 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
387 dma_free_coherent(dev->core_dev->device,
388 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
389 dev->sdr, dev->sdr_pa);
391 if (dev->scatter_buffer_va)
392 dma_free_coherent(dev->core_dev->device,
393 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
394 dev->scatter_buffer_va,
395 dev->scatter_buffer_pa);
399 * when this function is called.
400 * preemption or interrupt must be disabled
402 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
407 if (n >= PPC4XX_NUM_SD)
408 return ERING_WAS_FULL;
410 retval = dev->sdr_head;
411 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
412 if (dev->sdr_head > dev->gdr_tail) {
413 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
414 return ERING_WAS_FULL;
415 } else if (dev->sdr_head < dev->sdr_tail) {
416 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
417 return ERING_WAS_FULL;
418 } /* the head = tail, or empty case is already take cared */
424 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
428 spin_lock_irqsave(&dev->core_dev->lock, flags);
429 if (dev->sdr_tail == dev->sdr_head) {
430 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
433 if (dev->sdr_tail != PPC4XX_LAST_SD)
437 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
442 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
443 dma_addr_t *sd_dma, u32 idx)
445 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
447 return &dev->sdr[idx];
450 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
452 struct pd_uinfo *pd_uinfo,
454 struct scatterlist *dst)
456 unsigned int first_sd = pd_uinfo->first_sd;
457 unsigned int last_sd;
458 unsigned int overflow = 0;
459 unsigned int to_copy;
460 unsigned int dst_start = 0;
463 * Because the scatter buffers are all neatly organized in one
464 * big continuous ringbuffer; scatterwalk_map_and_copy() can
465 * be instructed to copy a range of buffers in one go.
468 last_sd = (first_sd + pd_uinfo->num_sd);
469 if (last_sd > PPC4XX_LAST_SD) {
470 last_sd = PPC4XX_LAST_SD;
471 overflow = last_sd % PPC4XX_NUM_SD;
475 void *buf = dev->scatter_buffer_va +
476 first_sd * PPC4XX_SD_BUFFER_SIZE;
478 to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
479 (1 + last_sd - first_sd));
480 scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
486 dst_start += to_copy;
492 static void crypto4xx_copy_digest_to_dst(void *dst,
493 struct pd_uinfo *pd_uinfo,
494 struct crypto4xx_ctx *ctx)
496 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
498 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
499 memcpy(dst, pd_uinfo->sr_va->save_digest,
500 SA_HASH_ALG_SHA1_DIGEST_SIZE);
504 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
505 struct pd_uinfo *pd_uinfo)
508 if (pd_uinfo->num_gd) {
509 for (i = 0; i < pd_uinfo->num_gd; i++)
510 crypto4xx_put_gd_to_gdr(dev);
511 pd_uinfo->first_gd = 0xffffffff;
512 pd_uinfo->num_gd = 0;
514 if (pd_uinfo->num_sd) {
515 for (i = 0; i < pd_uinfo->num_sd; i++)
516 crypto4xx_put_sd_to_sdr(dev);
518 pd_uinfo->first_sd = 0xffffffff;
519 pd_uinfo->num_sd = 0;
523 static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
524 struct pd_uinfo *pd_uinfo,
527 struct skcipher_request *req;
528 struct scatterlist *dst;
531 req = skcipher_request_cast(pd_uinfo->async_req);
533 if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
534 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
535 req->cryptlen, req->dst);
537 dst = pd_uinfo->dest_va;
538 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
539 dst->offset, dst->length, DMA_FROM_DEVICE);
542 if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
543 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
545 crypto4xx_memcpy_from_le32((u32 *)req->iv,
546 pd_uinfo->sr_va->save_iv,
547 crypto_skcipher_ivsize(skcipher));
550 crypto4xx_ret_sg_desc(dev, pd_uinfo);
552 if (pd_uinfo->state & PD_ENTRY_BUSY)
553 skcipher_request_complete(req, -EINPROGRESS);
554 skcipher_request_complete(req, 0);
557 static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
558 struct pd_uinfo *pd_uinfo)
560 struct crypto4xx_ctx *ctx;
561 struct ahash_request *ahash_req;
563 ahash_req = ahash_request_cast(pd_uinfo->async_req);
564 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
566 crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
567 crypto_tfm_ctx(ahash_req->base.tfm));
568 crypto4xx_ret_sg_desc(dev, pd_uinfo);
570 if (pd_uinfo->state & PD_ENTRY_BUSY)
571 ahash_request_complete(ahash_req, -EINPROGRESS);
572 ahash_request_complete(ahash_req, 0);
575 static void crypto4xx_aead_done(struct crypto4xx_device *dev,
576 struct pd_uinfo *pd_uinfo,
579 struct aead_request *aead_req = container_of(pd_uinfo->async_req,
580 struct aead_request, base);
581 struct scatterlist *dst = pd_uinfo->dest_va;
582 size_t cp_len = crypto_aead_authsize(
583 crypto_aead_reqtfm(aead_req));
584 u32 icv[AES_BLOCK_SIZE];
587 if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
588 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
589 pd->pd_ctl_len.bf.pkt_len,
592 dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
596 if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
597 /* append icv at the end */
598 crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
601 scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
604 /* check icv at the end */
605 scatterwalk_map_and_copy(icv, aead_req->src,
606 aead_req->assoclen + aead_req->cryptlen -
609 crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
611 if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
615 crypto4xx_ret_sg_desc(dev, pd_uinfo);
617 if (pd->pd_ctl.bf.status & 0xff) {
618 if (!__ratelimit(&dev->aead_ratelimit)) {
619 if (pd->pd_ctl.bf.status & 2)
620 pr_err("pad fail error\n");
621 if (pd->pd_ctl.bf.status & 4)
622 pr_err("seqnum fail\n");
623 if (pd->pd_ctl.bf.status & 8)
624 pr_err("error _notify\n");
625 pr_err("aead return err status = 0x%02x\n",
626 pd->pd_ctl.bf.status & 0xff);
627 pr_err("pd pad_ctl = 0x%08x\n",
628 pd->pd_ctl.bf.pd_pad_ctl);
633 if (pd_uinfo->state & PD_ENTRY_BUSY)
634 aead_request_complete(aead_req, -EINPROGRESS);
636 aead_request_complete(aead_req, err);
639 static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
641 struct ce_pd *pd = &dev->pdr[idx];
642 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
644 switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
645 case CRYPTO_ALG_TYPE_SKCIPHER:
646 crypto4xx_cipher_done(dev, pd_uinfo, pd);
648 case CRYPTO_ALG_TYPE_AEAD:
649 crypto4xx_aead_done(dev, pd_uinfo, pd);
651 case CRYPTO_ALG_TYPE_AHASH:
652 crypto4xx_ahash_done(dev, pd_uinfo);
657 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
659 crypto4xx_destroy_pdr(core_dev->dev);
660 crypto4xx_destroy_gdr(core_dev->dev);
661 crypto4xx_destroy_sdr(core_dev->dev);
662 iounmap(core_dev->dev->ce_base);
663 kfree(core_dev->dev);
667 static u32 get_next_gd(u32 current)
669 if (current != PPC4XX_LAST_GD)
675 static u32 get_next_sd(u32 current)
677 if (current != PPC4XX_LAST_SD)
683 int crypto4xx_build_pd(struct crypto_async_request *req,
684 struct crypto4xx_ctx *ctx,
685 struct scatterlist *src,
686 struct scatterlist *dst,
687 const unsigned int datalen,
688 const __le32 *iv, const u32 iv_len,
689 const struct dynamic_sa_ctl *req_sa,
690 const unsigned int sa_len,
691 const unsigned int assoclen,
692 struct scatterlist *_dst)
694 struct crypto4xx_device *dev = ctx->dev;
695 struct dynamic_sa_ctl *sa;
699 u32 fst_gd = 0xffffffff;
700 u32 fst_sd = 0xffffffff;
703 struct pd_uinfo *pd_uinfo;
704 unsigned int nbytes = datalen;
705 size_t offset_to_sr_ptr;
708 bool is_busy, force_sd;
711 * There's a very subtile/disguised "bug" in the hardware that
712 * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
713 * of the hardware spec:
714 * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
715 * operation modes for >>> "Block ciphers" <<<.
717 * To workaround this issue and stop the hardware from causing
718 * "overran dst buffer" on crypttexts that are not a multiple
719 * of 16 (AES_BLOCK_SIZE), we force the driver to use the
722 force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
723 || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
724 && (datalen % AES_BLOCK_SIZE);
726 /* figure how many gd are needed */
727 tmp = sg_nents_for_len(src, assoclen + datalen);
729 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
738 dst = scatterwalk_ffwd(_dst, dst, assoclen);
741 /* figure how many sd are needed */
742 if (sg_is_last(dst) && force_sd == false) {
745 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
746 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
747 if (datalen % PPC4XX_SD_BUFFER_SIZE)
755 * The follow section of code needs to be protected
756 * The gather ring and scatter ring needs to be consecutive
757 * In case of run out of any kind of descriptor, the descriptor
758 * already got must be return the original place.
760 spin_lock_irqsave(&dev->core_dev->lock, flags);
762 * Let the caller know to slow down, once more than 13/16ths = 81%
763 * of the available data contexts are being used simultaneously.
765 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
766 * 31 more contexts. Before new requests have to be rejected.
768 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
769 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
770 ((PPC4XX_NUM_PD * 13) / 16);
773 * To fix contention issues between ipsec (no blacklog) and
774 * dm-crypto (backlog) reserve 32 entries for "no backlog"
777 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
778 ((PPC4XX_NUM_PD * 15) / 16);
781 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
787 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
788 if (fst_gd == ERING_WAS_FULL) {
789 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
794 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
795 if (fst_sd == ERING_WAS_FULL) {
797 dev->gdr_head = fst_gd;
798 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
802 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
803 if (pd_entry == ERING_WAS_FULL) {
805 dev->gdr_head = fst_gd;
807 dev->sdr_head = fst_sd;
808 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
811 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
813 pd = &dev->pdr[pd_entry];
816 pd_uinfo = &dev->pdr_uinfo[pd_entry];
817 pd_uinfo->num_gd = num_gd;
818 pd_uinfo->num_sd = num_sd;
819 pd_uinfo->dest_va = dst;
820 pd_uinfo->async_req = req;
823 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
825 sa = pd_uinfo->sa_va;
826 memcpy(sa, req_sa, sa_len * 4);
828 sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
829 offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
830 *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
834 struct scatterlist *sg;
836 /* get first gd we are going to use */
838 pd_uinfo->first_gd = fst_gd;
839 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
842 sa->sa_command_0.bf.gather = 1;
843 /* walk the sg, and setup gather array */
849 len = min(sg->length, nbytes);
850 gd->ptr = dma_map_page(dev->core_dev->device,
851 sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
852 gd->ctl_len.len = len;
853 gd->ctl_len.done = 0;
854 gd->ctl_len.ready = 1;
858 nbytes -= sg->length;
859 gd_idx = get_next_gd(gd_idx);
860 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
864 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
865 src->offset, min(nbytes, src->length),
868 * Disable gather in sa command
870 sa->sa_command_0.bf.gather = 0;
872 * Indicate gather array is not used
874 pd_uinfo->first_gd = 0xffffffff;
878 * we know application give us dst a whole piece of memory
879 * no need to use scatter ring.
881 pd_uinfo->first_sd = 0xffffffff;
882 sa->sa_command_0.bf.scatter = 0;
883 pd->dest = (u32)dma_map_page(dev->core_dev->device,
884 sg_page(dst), dst->offset,
885 min(datalen, dst->length),
889 struct ce_sd *sd = NULL;
893 sa->sa_command_0.bf.scatter = 1;
894 pd_uinfo->first_sd = fst_sd;
895 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
897 /* setup scatter descriptor */
900 /* sd->ptr should be setup by sd_init routine*/
901 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
902 nbytes -= PPC4XX_SD_BUFFER_SIZE;
906 sd_idx = get_next_sd(sd_idx);
907 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
908 /* setup scatter descriptor */
911 if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
912 nbytes -= PPC4XX_SD_BUFFER_SIZE;
915 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
916 * which is more than nbytes, so done.
923 pd->pd_ctl.w = PD_CTL_HOST_READY |
924 ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
925 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
926 PD_CTL_HASH_FINAL : 0);
927 pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
928 pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
931 /* write any value to push engine to read a pd */
932 writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
933 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
934 return is_busy ? -EBUSY : -EINPROGRESS;
938 * Algorithm Registration Functions
940 static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
941 struct crypto4xx_ctx *ctx)
943 ctx->dev = amcc_alg->dev;
949 static int crypto4xx_sk_init(struct crypto_skcipher *sk)
951 struct skcipher_alg *alg = crypto_skcipher_alg(sk);
952 struct crypto4xx_alg *amcc_alg;
953 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
955 if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
956 ctx->sw_cipher.cipher =
957 crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
958 CRYPTO_ALG_NEED_FALLBACK);
959 if (IS_ERR(ctx->sw_cipher.cipher))
960 return PTR_ERR(ctx->sw_cipher.cipher);
963 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
964 crypto4xx_ctx_init(amcc_alg, ctx);
968 static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
970 crypto4xx_free_sa(ctx);
973 static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
975 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
977 crypto4xx_common_exit(ctx);
978 if (ctx->sw_cipher.cipher)
979 crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
982 static int crypto4xx_aead_init(struct crypto_aead *tfm)
984 struct aead_alg *alg = crypto_aead_alg(tfm);
985 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
986 struct crypto4xx_alg *amcc_alg;
988 ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
989 CRYPTO_ALG_NEED_FALLBACK |
991 if (IS_ERR(ctx->sw_cipher.aead))
992 return PTR_ERR(ctx->sw_cipher.aead);
994 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
995 crypto4xx_ctx_init(amcc_alg, ctx);
996 crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
997 crypto_aead_reqsize(ctx->sw_cipher.aead),
998 sizeof(struct crypto4xx_aead_reqctx)));
1002 static void crypto4xx_aead_exit(struct crypto_aead *tfm)
1004 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
1006 crypto4xx_common_exit(ctx);
1007 crypto_free_aead(ctx->sw_cipher.aead);
1010 static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1011 struct crypto4xx_alg_common *crypto_alg,
1014 struct crypto4xx_alg *alg;
1018 for (i = 0; i < array_size; i++) {
1019 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1023 alg->alg = crypto_alg[i];
1026 switch (alg->alg.type) {
1027 case CRYPTO_ALG_TYPE_AEAD:
1028 rc = crypto_register_aead(&alg->alg.u.aead);
1031 case CRYPTO_ALG_TYPE_AHASH:
1032 rc = crypto_register_ahash(&alg->alg.u.hash);
1035 case CRYPTO_ALG_TYPE_RNG:
1036 rc = crypto_register_rng(&alg->alg.u.rng);
1040 rc = crypto_register_skcipher(&alg->alg.u.cipher);
1047 list_add_tail(&alg->entry, &sec_dev->alg_list);
1053 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1055 struct crypto4xx_alg *alg, *tmp;
1057 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1058 list_del(&alg->entry);
1059 switch (alg->alg.type) {
1060 case CRYPTO_ALG_TYPE_AHASH:
1061 crypto_unregister_ahash(&alg->alg.u.hash);
1064 case CRYPTO_ALG_TYPE_AEAD:
1065 crypto_unregister_aead(&alg->alg.u.aead);
1068 case CRYPTO_ALG_TYPE_RNG:
1069 crypto_unregister_rng(&alg->alg.u.rng);
1073 crypto_unregister_skcipher(&alg->alg.u.cipher);
1079 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1081 struct device *dev = (struct device *)data;
1082 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1083 struct pd_uinfo *pd_uinfo;
1085 u32 tail = core_dev->dev->pdr_tail;
1086 u32 head = core_dev->dev->pdr_head;
1089 pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1090 pd = &core_dev->dev->pdr[tail];
1091 if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1092 ((READ_ONCE(pd->pd_ctl.w) &
1093 (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1095 crypto4xx_pd_done(core_dev->dev, tail);
1096 tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1098 /* if tail not done, break */
1101 } while (head != tail);
1107 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1110 struct device *dev = (struct device *)data;
1111 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1113 writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1114 tasklet_schedule(&core_dev->tasklet);
1119 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1121 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1124 static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1126 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1127 PPC4XX_TMO_ERR_INT);
1130 static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
1131 u8 *data, unsigned int max)
1133 unsigned int i, curr = 0;
1137 /* trigger PRN generation */
1138 writel(PPC4XX_PRNG_CTRL_AUTO_EN,
1139 dev->ce_base + CRYPTO4XX_PRNG_CTRL);
1141 for (i = 0; i < 1024; i++) {
1142 /* usually 19 iterations are enough */
1143 if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
1144 CRYPTO4XX_PRNG_STAT_BUSY))
1147 val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
1148 val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
1154 if ((max - curr) >= 8) {
1155 memcpy(data, &val, 8);
1159 /* copy only remaining bytes */
1160 memcpy(data, &val, max - curr);
1163 } while (curr < max);
1168 static int crypto4xx_prng_generate(struct crypto_rng *tfm,
1169 const u8 *src, unsigned int slen,
1170 u8 *dstn, unsigned int dlen)
1172 struct rng_alg *alg = crypto_rng_alg(tfm);
1173 struct crypto4xx_alg *amcc_alg;
1174 struct crypto4xx_device *dev;
1177 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
1178 dev = amcc_alg->dev;
1180 mutex_lock(&dev->core_dev->rng_lock);
1181 ret = ppc4xx_prng_data_read(dev, dstn, dlen);
1182 mutex_unlock(&dev->core_dev->rng_lock);
1187 static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
1194 * Supported Crypto Algorithms
1196 static struct crypto4xx_alg_common crypto4xx_alg[] = {
1197 /* Crypto AES modes */
1198 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1200 .cra_name = "cbc(aes)",
1201 .cra_driver_name = "cbc-aes-ppc4xx",
1202 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1203 .cra_flags = CRYPTO_ALG_ASYNC |
1204 CRYPTO_ALG_KERN_DRIVER_ONLY,
1205 .cra_blocksize = AES_BLOCK_SIZE,
1206 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1207 .cra_module = THIS_MODULE,
1209 .min_keysize = AES_MIN_KEY_SIZE,
1210 .max_keysize = AES_MAX_KEY_SIZE,
1211 .ivsize = AES_IV_SIZE,
1212 .setkey = crypto4xx_setkey_aes_cbc,
1213 .encrypt = crypto4xx_encrypt_iv,
1214 .decrypt = crypto4xx_decrypt_iv,
1215 .init = crypto4xx_sk_init,
1216 .exit = crypto4xx_sk_exit,
1218 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1220 .cra_name = "cfb(aes)",
1221 .cra_driver_name = "cfb-aes-ppc4xx",
1222 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1223 .cra_flags = CRYPTO_ALG_ASYNC |
1224 CRYPTO_ALG_KERN_DRIVER_ONLY,
1225 .cra_blocksize = AES_BLOCK_SIZE,
1226 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1227 .cra_module = THIS_MODULE,
1229 .min_keysize = AES_MIN_KEY_SIZE,
1230 .max_keysize = AES_MAX_KEY_SIZE,
1231 .ivsize = AES_IV_SIZE,
1232 .setkey = crypto4xx_setkey_aes_cfb,
1233 .encrypt = crypto4xx_encrypt_iv,
1234 .decrypt = crypto4xx_decrypt_iv,
1235 .init = crypto4xx_sk_init,
1236 .exit = crypto4xx_sk_exit,
1238 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1240 .cra_name = "ctr(aes)",
1241 .cra_driver_name = "ctr-aes-ppc4xx",
1242 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1243 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1245 CRYPTO_ALG_KERN_DRIVER_ONLY,
1246 .cra_blocksize = AES_BLOCK_SIZE,
1247 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1248 .cra_module = THIS_MODULE,
1250 .min_keysize = AES_MIN_KEY_SIZE,
1251 .max_keysize = AES_MAX_KEY_SIZE,
1252 .ivsize = AES_IV_SIZE,
1253 .setkey = crypto4xx_setkey_aes_ctr,
1254 .encrypt = crypto4xx_encrypt_ctr,
1255 .decrypt = crypto4xx_decrypt_ctr,
1256 .init = crypto4xx_sk_init,
1257 .exit = crypto4xx_sk_exit,
1259 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1261 .cra_name = "rfc3686(ctr(aes))",
1262 .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1263 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1264 .cra_flags = CRYPTO_ALG_ASYNC |
1265 CRYPTO_ALG_KERN_DRIVER_ONLY,
1266 .cra_blocksize = AES_BLOCK_SIZE,
1267 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1268 .cra_module = THIS_MODULE,
1270 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1271 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1272 .ivsize = CTR_RFC3686_IV_SIZE,
1273 .setkey = crypto4xx_setkey_rfc3686,
1274 .encrypt = crypto4xx_rfc3686_encrypt,
1275 .decrypt = crypto4xx_rfc3686_decrypt,
1276 .init = crypto4xx_sk_init,
1277 .exit = crypto4xx_sk_exit,
1279 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1281 .cra_name = "ecb(aes)",
1282 .cra_driver_name = "ecb-aes-ppc4xx",
1283 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1284 .cra_flags = CRYPTO_ALG_ASYNC |
1285 CRYPTO_ALG_KERN_DRIVER_ONLY,
1286 .cra_blocksize = AES_BLOCK_SIZE,
1287 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1288 .cra_module = THIS_MODULE,
1290 .min_keysize = AES_MIN_KEY_SIZE,
1291 .max_keysize = AES_MAX_KEY_SIZE,
1292 .setkey = crypto4xx_setkey_aes_ecb,
1293 .encrypt = crypto4xx_encrypt_noiv,
1294 .decrypt = crypto4xx_decrypt_noiv,
1295 .init = crypto4xx_sk_init,
1296 .exit = crypto4xx_sk_exit,
1298 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1300 .cra_name = "ofb(aes)",
1301 .cra_driver_name = "ofb-aes-ppc4xx",
1302 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1303 .cra_flags = CRYPTO_ALG_ASYNC |
1304 CRYPTO_ALG_KERN_DRIVER_ONLY,
1305 .cra_blocksize = AES_BLOCK_SIZE,
1306 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1307 .cra_module = THIS_MODULE,
1309 .min_keysize = AES_MIN_KEY_SIZE,
1310 .max_keysize = AES_MAX_KEY_SIZE,
1311 .ivsize = AES_IV_SIZE,
1312 .setkey = crypto4xx_setkey_aes_ofb,
1313 .encrypt = crypto4xx_encrypt_iv,
1314 .decrypt = crypto4xx_decrypt_iv,
1315 .init = crypto4xx_sk_init,
1316 .exit = crypto4xx_sk_exit,
1320 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1321 .setkey = crypto4xx_setkey_aes_ccm,
1322 .setauthsize = crypto4xx_setauthsize_aead,
1323 .encrypt = crypto4xx_encrypt_aes_ccm,
1324 .decrypt = crypto4xx_decrypt_aes_ccm,
1325 .init = crypto4xx_aead_init,
1326 .exit = crypto4xx_aead_exit,
1327 .ivsize = AES_BLOCK_SIZE,
1330 .cra_name = "ccm(aes)",
1331 .cra_driver_name = "ccm-aes-ppc4xx",
1332 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1333 .cra_flags = CRYPTO_ALG_ASYNC |
1334 CRYPTO_ALG_NEED_FALLBACK |
1335 CRYPTO_ALG_KERN_DRIVER_ONLY,
1337 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1338 .cra_module = THIS_MODULE,
1341 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1342 .setkey = crypto4xx_setkey_aes_gcm,
1343 .setauthsize = crypto4xx_setauthsize_aead,
1344 .encrypt = crypto4xx_encrypt_aes_gcm,
1345 .decrypt = crypto4xx_decrypt_aes_gcm,
1346 .init = crypto4xx_aead_init,
1347 .exit = crypto4xx_aead_exit,
1348 .ivsize = GCM_AES_IV_SIZE,
1351 .cra_name = "gcm(aes)",
1352 .cra_driver_name = "gcm-aes-ppc4xx",
1353 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1354 .cra_flags = CRYPTO_ALG_ASYNC |
1355 CRYPTO_ALG_NEED_FALLBACK |
1356 CRYPTO_ALG_KERN_DRIVER_ONLY,
1358 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1359 .cra_module = THIS_MODULE,
1362 { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
1364 .cra_name = "stdrng",
1365 .cra_driver_name = "crypto4xx_rng",
1366 .cra_priority = 300,
1368 .cra_module = THIS_MODULE,
1370 .generate = crypto4xx_prng_generate,
1371 .seed = crypto4xx_prng_seed,
1377 * Module Initialization Routine
1379 static int crypto4xx_probe(struct platform_device *ofdev)
1382 struct resource res;
1383 struct device *dev = &ofdev->dev;
1384 struct crypto4xx_core_device *core_dev;
1386 bool is_revb = true;
1388 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1392 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1393 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1394 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1395 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1396 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1397 } else if (of_find_compatible_node(NULL, NULL,
1398 "amcc,ppc405ex-crypto")) {
1399 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1400 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1401 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1402 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1404 } else if (of_find_compatible_node(NULL, NULL,
1405 "amcc,ppc460sx-crypto")) {
1406 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1407 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1408 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1409 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1411 printk(KERN_ERR "Crypto Function Not supported!\n");
1415 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1419 dev_set_drvdata(dev, core_dev);
1420 core_dev->ofdev = ofdev;
1421 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1427 * Older version of 460EX/GT have a hardware bug.
1428 * Hence they do not support H/W based security intr coalescing
1430 pvr = mfspr(SPRN_PVR);
1431 if (is_revb && ((pvr >> 4) == 0x130218A)) {
1432 u32 min = PVR_MIN(pvr);
1435 dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1440 core_dev->dev->core_dev = core_dev;
1441 core_dev->dev->is_revb = is_revb;
1442 core_dev->device = dev;
1443 mutex_init(&core_dev->rng_lock);
1444 spin_lock_init(&core_dev->lock);
1445 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1446 ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1447 rc = crypto4xx_build_pdr(core_dev->dev);
1451 rc = crypto4xx_build_gdr(core_dev->dev);
1455 rc = crypto4xx_build_sdr(core_dev->dev);
1459 /* Init tasklet for bottom half processing */
1460 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1461 (unsigned long) dev);
1463 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1464 if (!core_dev->dev->ce_base) {
1465 dev_err(dev, "failed to of_iomap\n");
1470 /* Register for Crypto isr, Crypto Engine IRQ */
1471 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1472 rc = request_irq(core_dev->irq, is_revb ?
1473 crypto4xx_ce_interrupt_handler_revb :
1474 crypto4xx_ce_interrupt_handler, 0,
1475 KBUILD_MODNAME, dev);
1477 goto err_request_irq;
1479 /* need to setup pdr, rdr, gdr and sdr before this */
1480 crypto4xx_hw_init(core_dev->dev);
1482 /* Register security algorithms with Linux CryptoAPI */
1483 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1484 ARRAY_SIZE(crypto4xx_alg));
1488 ppc4xx_trng_probe(core_dev);
1492 free_irq(core_dev->irq, dev);
1494 irq_dispose_mapping(core_dev->irq);
1495 iounmap(core_dev->dev->ce_base);
1497 tasklet_kill(&core_dev->tasklet);
1499 crypto4xx_destroy_sdr(core_dev->dev);
1500 crypto4xx_destroy_gdr(core_dev->dev);
1502 crypto4xx_destroy_pdr(core_dev->dev);
1503 kfree(core_dev->dev);
1510 static int crypto4xx_remove(struct platform_device *ofdev)
1512 struct device *dev = &ofdev->dev;
1513 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1515 ppc4xx_trng_remove(core_dev);
1517 free_irq(core_dev->irq, dev);
1518 irq_dispose_mapping(core_dev->irq);
1520 tasklet_kill(&core_dev->tasklet);
1521 /* Un-register with Linux CryptoAPI */
1522 crypto4xx_unregister_alg(core_dev->dev);
1523 mutex_destroy(&core_dev->rng_lock);
1524 /* Free all allocated memory */
1525 crypto4xx_stop_all(core_dev);
1530 static const struct of_device_id crypto4xx_match[] = {
1531 { .compatible = "amcc,ppc4xx-crypto",},
1534 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1536 static struct platform_driver crypto4xx_driver = {
1538 .name = KBUILD_MODNAME,
1539 .of_match_table = crypto4xx_match,
1541 .probe = crypto4xx_probe,
1542 .remove = crypto4xx_remove,
1545 module_platform_driver(crypto4xx_driver);
1547 MODULE_LICENSE("GPL");
1548 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1549 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");