2 * Freescale i.MX23/i.MX28 Data Co-Processor driver
4 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
14 #include <linux/dma-mapping.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/kthread.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/stmp_device.h>
24 #include <crypto/aes.h>
25 #include <crypto/sha.h>
26 #include <crypto/internal/hash.h>
27 #include <crypto/internal/skcipher.h>
29 #define DCP_MAX_CHANS 4
30 #define DCP_BUF_SZ PAGE_SIZE
32 #define DCP_ALIGNMENT 64
34 /* DCP DMA descriptor. */
36 uint32_t next_cmd_addr;
46 /* Coherent aligned block for bounce buffering. */
47 struct dcp_coherent_block {
48 uint8_t aes_in_buf[DCP_BUF_SZ];
49 uint8_t aes_out_buf[DCP_BUF_SZ];
50 uint8_t sha_in_buf[DCP_BUF_SZ];
52 uint8_t aes_key[2 * AES_KEYSIZE_128];
54 struct dcp_dma_desc desc[DCP_MAX_CHANS];
63 struct dcp_coherent_block *coh;
65 struct completion completion[DCP_MAX_CHANS];
66 spinlock_t lock[DCP_MAX_CHANS];
67 struct task_struct *thread[DCP_MAX_CHANS];
68 struct crypto_queue queue[DCP_MAX_CHANS];
72 DCP_CHAN_HASH_SHA = 0,
76 struct dcp_async_ctx {
81 /* SHA Hash-specific context */
86 /* Crypto-specific context */
87 struct crypto_skcipher *fallback;
89 uint8_t key[AES_KEYSIZE_128];
92 struct dcp_aes_req_ctx {
97 struct dcp_sha_req_ctx {
103 * There can even be only one instance of the MXS DCP due to the
104 * design of Linux Crypto API.
106 static struct dcp *global_sdcp;
108 /* DCP register layout. */
109 #define MXS_DCP_CTRL 0x00
110 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
111 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
113 #define MXS_DCP_STAT 0x10
114 #define MXS_DCP_STAT_CLR 0x18
115 #define MXS_DCP_STAT_IRQ_MASK 0xf
117 #define MXS_DCP_CHANNELCTRL 0x20
118 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
120 #define MXS_DCP_CAPABILITY1 0x40
121 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
122 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
123 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
125 #define MXS_DCP_CONTEXT 0x50
127 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
129 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
131 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
132 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
134 /* DMA descriptor bits. */
135 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
136 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
137 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
138 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
139 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
140 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
141 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
142 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
143 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
145 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
146 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
147 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
148 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
149 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
151 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
153 struct dcp *sdcp = global_sdcp;
154 const int chan = actx->chan;
157 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
159 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
162 reinit_completion(&sdcp->completion[chan]);
164 /* Clear status register. */
165 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
167 /* Load the DMA descriptor. */
168 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
170 /* Increment the semaphore to start the DMA transfer. */
171 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
173 ret = wait_for_completion_timeout(&sdcp->completion[chan],
174 msecs_to_jiffies(1000));
176 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
177 chan, readl(sdcp->base + MXS_DCP_STAT));
181 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
183 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
188 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
194 * Encryption (AES128)
196 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
197 struct ablkcipher_request *req, int init)
199 struct dcp *sdcp = global_sdcp;
200 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
201 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
204 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
207 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
208 DCP_BUF_SZ, DMA_TO_DEVICE);
209 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
210 DCP_BUF_SZ, DMA_FROM_DEVICE);
212 /* Fill in the DMA descriptor. */
213 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
214 MXS_DCP_CONTROL0_INTERRUPT |
215 MXS_DCP_CONTROL0_ENABLE_CIPHER;
217 /* Payload contains the key. */
218 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
221 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
223 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
225 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
228 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
230 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
232 desc->next_cmd_addr = 0;
233 desc->source = src_phys;
234 desc->destination = dst_phys;
235 desc->size = actx->fill;
236 desc->payload = key_phys;
239 ret = mxs_dcp_start_dma(actx);
241 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
243 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
244 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
249 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
251 struct dcp *sdcp = global_sdcp;
253 struct ablkcipher_request *req = ablkcipher_request_cast(arq);
254 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
255 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
257 struct scatterlist *dst = req->dst;
258 struct scatterlist *src = req->src;
259 const int nents = sg_nents(req->src);
261 const int out_off = DCP_BUF_SZ;
262 uint8_t *in_buf = sdcp->coh->aes_in_buf;
263 uint8_t *out_buf = sdcp->coh->aes_out_buf;
265 uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
266 uint32_t dst_off = 0;
268 uint8_t *key = sdcp->coh->aes_key;
272 unsigned int i, len, clen, rem = 0;
277 /* Copy the key from the temporary location. */
278 memcpy(key, actx->key, actx->key_len);
281 /* Copy the CBC IV just past the key. */
282 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
283 /* CBC needs the INIT set. */
286 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
289 for_each_sg(req->src, src, nents, i) {
290 src_buf = sg_virt(src);
291 len = sg_dma_len(src);
294 if (actx->fill + len > out_off)
295 clen = out_off - actx->fill;
299 memcpy(in_buf + actx->fill, src_buf, clen);
305 * If we filled the buffer or this is the last SG,
308 if (actx->fill == out_off || sg_is_last(src)) {
309 ret = mxs_dcp_run_aes(actx, req, init);
315 while (dst && actx->fill) {
317 dst_buf = sg_virt(dst);
320 rem = min(sg_dma_len(dst) - dst_off,
323 memcpy(dst_buf + dst_off, out_tmp, rem);
328 if (dst_off == sg_dma_len(dst)) {
342 static int dcp_chan_thread_aes(void *data)
344 struct dcp *sdcp = global_sdcp;
345 const int chan = DCP_CHAN_CRYPTO;
347 struct crypto_async_request *backlog;
348 struct crypto_async_request *arq;
352 while (!kthread_should_stop()) {
353 set_current_state(TASK_INTERRUPTIBLE);
355 spin_lock(&sdcp->lock[chan]);
356 backlog = crypto_get_backlog(&sdcp->queue[chan]);
357 arq = crypto_dequeue_request(&sdcp->queue[chan]);
358 spin_unlock(&sdcp->lock[chan]);
360 if (!backlog && !arq) {
365 set_current_state(TASK_RUNNING);
368 backlog->complete(backlog, -EINPROGRESS);
371 ret = mxs_dcp_aes_block_crypt(arq);
372 arq->complete(arq, ret);
379 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
381 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
382 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
383 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
386 skcipher_request_set_tfm(subreq, ctx->fallback);
387 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
388 skcipher_request_set_crypt(subreq, req->src, req->dst,
389 req->nbytes, req->info);
392 ret = crypto_skcipher_encrypt(subreq);
394 ret = crypto_skcipher_decrypt(subreq);
396 skcipher_request_zero(subreq);
401 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
403 struct dcp *sdcp = global_sdcp;
404 struct crypto_async_request *arq = &req->base;
405 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
406 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
409 if (unlikely(actx->key_len != AES_KEYSIZE_128))
410 return mxs_dcp_block_fallback(req, enc);
414 actx->chan = DCP_CHAN_CRYPTO;
416 spin_lock(&sdcp->lock[actx->chan]);
417 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
418 spin_unlock(&sdcp->lock[actx->chan]);
420 wake_up_process(sdcp->thread[actx->chan]);
425 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
427 return mxs_dcp_aes_enqueue(req, 0, 1);
430 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
432 return mxs_dcp_aes_enqueue(req, 1, 1);
435 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
437 return mxs_dcp_aes_enqueue(req, 0, 0);
440 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
442 return mxs_dcp_aes_enqueue(req, 1, 0);
445 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
448 struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
452 * AES 128 is supposed by the hardware, store key into temporary
453 * buffer and exit. We must use the temporary buffer here, since
454 * there can still be an operation in progress.
457 if (len == AES_KEYSIZE_128) {
458 memcpy(actx->key, key, len);
463 * If the requested AES key size is not supported by the hardware,
464 * but is supported by in-kernel software implementation, we use
467 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
468 crypto_skcipher_set_flags(actx->fallback,
469 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
471 ret = crypto_skcipher_setkey(actx->fallback, key, len);
475 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
476 tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
482 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
484 const char *name = crypto_tfm_alg_name(tfm);
485 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
486 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
487 struct crypto_skcipher *blk;
489 blk = crypto_alloc_skcipher(name, 0, flags);
493 actx->fallback = blk;
494 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
498 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
500 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
502 crypto_free_skcipher(actx->fallback);
506 * Hashing (SHA1/SHA256)
508 static int mxs_dcp_run_sha(struct ahash_request *req)
510 struct dcp *sdcp = global_sdcp;
513 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
514 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
515 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
516 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
518 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
520 dma_addr_t digest_phys = 0;
521 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
522 DCP_BUF_SZ, DMA_TO_DEVICE);
524 /* Fill in the DMA descriptor. */
525 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
526 MXS_DCP_CONTROL0_INTERRUPT |
527 MXS_DCP_CONTROL0_ENABLE_HASH;
529 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
531 desc->control1 = actx->alg;
532 desc->next_cmd_addr = 0;
533 desc->source = buf_phys;
534 desc->destination = 0;
535 desc->size = actx->fill;
539 /* Set HASH_TERM bit for last transfer block. */
541 digest_phys = dma_map_single(sdcp->dev, req->result,
542 halg->digestsize, DMA_FROM_DEVICE);
543 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
544 desc->payload = digest_phys;
547 ret = mxs_dcp_start_dma(actx);
550 dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
553 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
558 static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
560 struct dcp *sdcp = global_sdcp;
562 struct ahash_request *req = ahash_request_cast(arq);
563 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
564 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
565 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
566 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
567 const int nents = sg_nents(req->src);
569 uint8_t *in_buf = sdcp->coh->sha_in_buf;
573 struct scatterlist *src;
575 unsigned int i, len, clen;
578 int fin = rctx->fini;
582 for_each_sg(req->src, src, nents, i) {
583 src_buf = sg_virt(src);
584 len = sg_dma_len(src);
587 if (actx->fill + len > DCP_BUF_SZ)
588 clen = DCP_BUF_SZ - actx->fill;
592 memcpy(in_buf + actx->fill, src_buf, clen);
598 * If we filled the buffer and still have some
599 * more data, submit the buffer.
601 if (len && actx->fill == DCP_BUF_SZ) {
602 ret = mxs_dcp_run_sha(req);
614 /* Submit whatever is left. */
618 ret = mxs_dcp_run_sha(req);
624 /* For some reason, the result is flipped. */
625 for (i = 0; i < halg->digestsize / 2; i++) {
627 req->result[halg->digestsize - i - 1]);
634 static int dcp_chan_thread_sha(void *data)
636 struct dcp *sdcp = global_sdcp;
637 const int chan = DCP_CHAN_HASH_SHA;
639 struct crypto_async_request *backlog;
640 struct crypto_async_request *arq;
642 struct dcp_sha_req_ctx *rctx;
644 struct ahash_request *req;
647 while (!kthread_should_stop()) {
648 set_current_state(TASK_INTERRUPTIBLE);
650 spin_lock(&sdcp->lock[chan]);
651 backlog = crypto_get_backlog(&sdcp->queue[chan]);
652 arq = crypto_dequeue_request(&sdcp->queue[chan]);
653 spin_unlock(&sdcp->lock[chan]);
655 if (!backlog && !arq) {
660 set_current_state(TASK_RUNNING);
663 backlog->complete(backlog, -EINPROGRESS);
666 req = ahash_request_cast(arq);
667 rctx = ahash_request_ctx(req);
669 ret = dcp_sha_req_to_buf(arq);
671 arq->complete(arq, ret);
678 static int dcp_sha_init(struct ahash_request *req)
680 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
681 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
683 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
686 * Start hashing session. The code below only inits the
687 * hashing session context, nothing more.
689 memset(actx, 0, sizeof(*actx));
691 if (strcmp(halg->base.cra_name, "sha1") == 0)
692 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
694 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
698 actx->chan = DCP_CHAN_HASH_SHA;
700 mutex_init(&actx->mutex);
705 static int dcp_sha_update_fx(struct ahash_request *req, int fini)
707 struct dcp *sdcp = global_sdcp;
709 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
710 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
711 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
716 * Ignore requests that have no data in them and are not
717 * the trailing requests in the stream of requests.
719 if (!req->nbytes && !fini)
722 mutex_lock(&actx->mutex);
731 spin_lock(&sdcp->lock[actx->chan]);
732 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
733 spin_unlock(&sdcp->lock[actx->chan]);
735 wake_up_process(sdcp->thread[actx->chan]);
736 mutex_unlock(&actx->mutex);
741 static int dcp_sha_update(struct ahash_request *req)
743 return dcp_sha_update_fx(req, 0);
746 static int dcp_sha_final(struct ahash_request *req)
748 ahash_request_set_crypt(req, NULL, req->result, 0);
750 return dcp_sha_update_fx(req, 1);
753 static int dcp_sha_finup(struct ahash_request *req)
755 return dcp_sha_update_fx(req, 1);
758 static int dcp_sha_digest(struct ahash_request *req)
762 ret = dcp_sha_init(req);
766 return dcp_sha_finup(req);
769 static int dcp_sha_noimport(struct ahash_request *req, const void *in)
774 static int dcp_sha_noexport(struct ahash_request *req, void *out)
779 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
781 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
782 sizeof(struct dcp_sha_req_ctx));
786 static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
790 /* AES 128 ECB and AES 128 CBC */
791 static struct crypto_alg dcp_aes_algs[] = {
793 .cra_name = "ecb(aes)",
794 .cra_driver_name = "ecb-aes-dcp",
797 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
799 CRYPTO_ALG_NEED_FALLBACK,
800 .cra_init = mxs_dcp_aes_fallback_init,
801 .cra_exit = mxs_dcp_aes_fallback_exit,
802 .cra_blocksize = AES_BLOCK_SIZE,
803 .cra_ctxsize = sizeof(struct dcp_async_ctx),
804 .cra_type = &crypto_ablkcipher_type,
805 .cra_module = THIS_MODULE,
808 .min_keysize = AES_MIN_KEY_SIZE,
809 .max_keysize = AES_MAX_KEY_SIZE,
810 .setkey = mxs_dcp_aes_setkey,
811 .encrypt = mxs_dcp_aes_ecb_encrypt,
812 .decrypt = mxs_dcp_aes_ecb_decrypt
816 .cra_name = "cbc(aes)",
817 .cra_driver_name = "cbc-aes-dcp",
820 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
822 CRYPTO_ALG_NEED_FALLBACK,
823 .cra_init = mxs_dcp_aes_fallback_init,
824 .cra_exit = mxs_dcp_aes_fallback_exit,
825 .cra_blocksize = AES_BLOCK_SIZE,
826 .cra_ctxsize = sizeof(struct dcp_async_ctx),
827 .cra_type = &crypto_ablkcipher_type,
828 .cra_module = THIS_MODULE,
831 .min_keysize = AES_MIN_KEY_SIZE,
832 .max_keysize = AES_MAX_KEY_SIZE,
833 .setkey = mxs_dcp_aes_setkey,
834 .encrypt = mxs_dcp_aes_cbc_encrypt,
835 .decrypt = mxs_dcp_aes_cbc_decrypt,
836 .ivsize = AES_BLOCK_SIZE,
843 static struct ahash_alg dcp_sha1_alg = {
844 .init = dcp_sha_init,
845 .update = dcp_sha_update,
846 .final = dcp_sha_final,
847 .finup = dcp_sha_finup,
848 .digest = dcp_sha_digest,
849 .import = dcp_sha_noimport,
850 .export = dcp_sha_noexport,
852 .digestsize = SHA1_DIGEST_SIZE,
855 .cra_driver_name = "sha1-dcp",
858 .cra_flags = CRYPTO_ALG_ASYNC,
859 .cra_blocksize = SHA1_BLOCK_SIZE,
860 .cra_ctxsize = sizeof(struct dcp_async_ctx),
861 .cra_module = THIS_MODULE,
862 .cra_init = dcp_sha_cra_init,
863 .cra_exit = dcp_sha_cra_exit,
869 static struct ahash_alg dcp_sha256_alg = {
870 .init = dcp_sha_init,
871 .update = dcp_sha_update,
872 .final = dcp_sha_final,
873 .finup = dcp_sha_finup,
874 .digest = dcp_sha_digest,
875 .import = dcp_sha_noimport,
876 .export = dcp_sha_noexport,
878 .digestsize = SHA256_DIGEST_SIZE,
880 .cra_name = "sha256",
881 .cra_driver_name = "sha256-dcp",
884 .cra_flags = CRYPTO_ALG_ASYNC,
885 .cra_blocksize = SHA256_BLOCK_SIZE,
886 .cra_ctxsize = sizeof(struct dcp_async_ctx),
887 .cra_module = THIS_MODULE,
888 .cra_init = dcp_sha_cra_init,
889 .cra_exit = dcp_sha_cra_exit,
894 static irqreturn_t mxs_dcp_irq(int irq, void *context)
896 struct dcp *sdcp = context;
900 stat = readl(sdcp->base + MXS_DCP_STAT);
901 stat &= MXS_DCP_STAT_IRQ_MASK;
905 /* Clear the interrupts. */
906 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
908 /* Complete the DMA requests that finished. */
909 for (i = 0; i < DCP_MAX_CHANS; i++)
911 complete(&sdcp->completion[i]);
916 static int mxs_dcp_probe(struct platform_device *pdev)
918 struct device *dev = &pdev->dev;
919 struct dcp *sdcp = NULL;
922 struct resource *iores;
923 int dcp_vmi_irq, dcp_irq;
926 dev_err(dev, "Only one DCP instance allowed!\n");
930 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
931 dcp_vmi_irq = platform_get_irq(pdev, 0);
932 if (dcp_vmi_irq < 0) {
933 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
937 dcp_irq = platform_get_irq(pdev, 1);
939 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
943 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
948 sdcp->base = devm_ioremap_resource(dev, iores);
949 if (IS_ERR(sdcp->base))
950 return PTR_ERR(sdcp->base);
953 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
954 "dcp-vmi-irq", sdcp);
956 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
960 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
963 dev_err(dev, "Failed to claim DCP IRQ!\n");
967 /* Allocate coherent helper block. */
968 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
973 /* Re-align the structure so it fits the DCP constraints. */
974 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
976 /* Restart the DCP block. */
977 ret = stmp_reset_block(sdcp->base);
981 /* Initialize control register. */
982 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
983 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
984 sdcp->base + MXS_DCP_CTRL);
986 /* Enable all DCP DMA channels. */
987 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
988 sdcp->base + MXS_DCP_CHANNELCTRL);
991 * We do not enable context switching. Give the context buffer a
992 * pointer to an illegal address so if context switching is
993 * inadvertantly enabled, the DCP will return an error instead of
994 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
997 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
998 for (i = 0; i < DCP_MAX_CHANS; i++)
999 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1000 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1004 platform_set_drvdata(pdev, sdcp);
1006 for (i = 0; i < DCP_MAX_CHANS; i++) {
1007 spin_lock_init(&sdcp->lock[i]);
1008 init_completion(&sdcp->completion[i]);
1009 crypto_init_queue(&sdcp->queue[i], 50);
1012 /* Create the SHA and AES handler threads. */
1013 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1014 NULL, "mxs_dcp_chan/sha");
1015 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1016 dev_err(dev, "Error starting SHA thread!\n");
1017 return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1020 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1021 NULL, "mxs_dcp_chan/aes");
1022 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1023 dev_err(dev, "Error starting SHA thread!\n");
1024 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1025 goto err_destroy_sha_thread;
1028 /* Register the various crypto algorithms. */
1029 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1031 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1032 ret = crypto_register_algs(dcp_aes_algs,
1033 ARRAY_SIZE(dcp_aes_algs));
1035 /* Failed to register algorithm. */
1036 dev_err(dev, "Failed to register AES crypto!\n");
1037 goto err_destroy_aes_thread;
1041 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1042 ret = crypto_register_ahash(&dcp_sha1_alg);
1044 dev_err(dev, "Failed to register %s hash!\n",
1045 dcp_sha1_alg.halg.base.cra_name);
1046 goto err_unregister_aes;
1050 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1051 ret = crypto_register_ahash(&dcp_sha256_alg);
1053 dev_err(dev, "Failed to register %s hash!\n",
1054 dcp_sha256_alg.halg.base.cra_name);
1055 goto err_unregister_sha1;
1061 err_unregister_sha1:
1062 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1063 crypto_unregister_ahash(&dcp_sha1_alg);
1066 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1067 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1069 err_destroy_aes_thread:
1070 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1072 err_destroy_sha_thread:
1073 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1077 static int mxs_dcp_remove(struct platform_device *pdev)
1079 struct dcp *sdcp = platform_get_drvdata(pdev);
1081 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1082 crypto_unregister_ahash(&dcp_sha256_alg);
1084 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1085 crypto_unregister_ahash(&dcp_sha1_alg);
1087 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1088 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1090 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1091 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1093 platform_set_drvdata(pdev, NULL);
1100 static const struct of_device_id mxs_dcp_dt_ids[] = {
1101 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1102 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1106 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1108 static struct platform_driver mxs_dcp_driver = {
1109 .probe = mxs_dcp_probe,
1110 .remove = mxs_dcp_remove,
1113 .of_match_table = mxs_dcp_dt_ids,
1117 module_platform_driver(mxs_dcp_driver);
1119 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1120 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1121 MODULE_LICENSE("GPL");
1122 MODULE_ALIAS("platform:mxs-dcp");