1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <crypto/aes.h>
5 #include <crypto/algapi.h>
6 #include <crypto/authenc.h>
7 #include <crypto/des.h>
8 #include <crypto/hash.h>
9 #include <crypto/internal/aead.h>
10 #include <crypto/internal/des.h>
11 #include <crypto/sha1.h>
12 #include <crypto/sha2.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/idr.h>
20 #include "sec_crypto.h"
22 #define SEC_PRIORITY 4001
23 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
24 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
25 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
26 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
28 /* SEC sqe(bd) bit operational relative MACRO */
29 #define SEC_DE_OFFSET 1
30 #define SEC_CIPHER_OFFSET 4
31 #define SEC_SCENE_OFFSET 3
32 #define SEC_DST_SGL_OFFSET 2
33 #define SEC_SRC_SGL_OFFSET 7
34 #define SEC_CKEY_OFFSET 9
35 #define SEC_CMODE_OFFSET 12
36 #define SEC_AKEY_OFFSET 5
37 #define SEC_AEAD_ALG_OFFSET 11
38 #define SEC_AUTH_OFFSET 6
40 #define SEC_FLAG_OFFSET 7
41 #define SEC_FLAG_MASK 0x0780
42 #define SEC_TYPE_MASK 0x0F
43 #define SEC_DONE_MASK 0x0001
45 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
46 #define SEC_SGL_SGE_NR 128
47 #define SEC_CIPHER_AUTH 0xfe
48 #define SEC_AUTH_CIPHER 0x1
49 #define SEC_MAX_MAC_LEN 64
50 #define SEC_MAX_AAD_LEN 65535
51 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
53 #define SEC_PBUF_SZ 512
54 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
55 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
56 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
58 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
59 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
60 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
61 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
62 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
65 #define SEC_SQE_LEN_RATE 4
66 #define SEC_SQE_CFLAG 2
67 #define SEC_SQE_AEAD_FLAG 3
68 #define SEC_SQE_DONE 0x1
70 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
71 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
73 if (req->c_req.encrypt)
74 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
77 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
81 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
83 if (req->c_req.encrypt)
84 atomic_dec(&ctx->enc_qcyclic);
86 atomic_dec(&ctx->dec_qcyclic);
89 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
93 mutex_lock(&qp_ctx->req_lock);
95 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
96 0, QM_Q_DEPTH, GFP_ATOMIC);
97 mutex_unlock(&qp_ctx->req_lock);
98 if (unlikely(req_id < 0)) {
99 dev_err(req->ctx->dev, "alloc req id fail!\n");
103 req->qp_ctx = qp_ctx;
104 qp_ctx->req_list[req_id] = req;
109 static void sec_free_req_id(struct sec_req *req)
111 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
112 int req_id = req->req_id;
114 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
115 dev_err(req->ctx->dev, "free request id invalid!\n");
119 qp_ctx->req_list[req_id] = NULL;
122 mutex_lock(&qp_ctx->req_lock);
123 idr_remove(&qp_ctx->req_idr, req_id);
124 mutex_unlock(&qp_ctx->req_lock);
127 static int sec_aead_verify(struct sec_req *req)
129 struct aead_request *aead_req = req->aead_req.aead_req;
130 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
131 size_t authsize = crypto_aead_authsize(tfm);
132 u8 *mac_out = req->aead_req.out_mac;
133 u8 *mac = mac_out + SEC_MAX_MAC_LEN;
134 struct scatterlist *sgl = aead_req->src;
137 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
138 aead_req->cryptlen + aead_req->assoclen -
140 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
141 dev_err(req->ctx->dev, "aead verify failure!\n");
148 static void sec_req_cb(struct hisi_qp *qp, void *resp)
150 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
151 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
152 struct sec_sqe *bd = resp;
159 type = bd->type_cipher_auth & SEC_TYPE_MASK;
160 if (unlikely(type != SEC_BD_TYPE2)) {
161 atomic64_inc(&dfx->err_bd_cnt);
162 pr_err("err bd type [%d]\n", type);
166 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
167 if (unlikely(!req)) {
168 atomic64_inc(&dfx->invalid_req_cnt);
169 atomic_inc(&qp->qp_status.used);
172 req->err_type = bd->type2.error_type;
174 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
175 flag = (le16_to_cpu(bd->type2.done_flag) &
176 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
177 if (unlikely(req->err_type || done != SEC_SQE_DONE ||
178 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
179 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
180 dev_err_ratelimited(ctx->dev,
181 "err_type[%d],done[%d],flag[%d]\n",
182 req->err_type, done, flag);
184 atomic64_inc(&dfx->done_flag_cnt);
187 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
188 err = sec_aead_verify(req);
190 atomic64_inc(&dfx->recv_cnt);
192 ctx->req_op->buf_unmap(ctx, req);
194 ctx->req_op->callback(ctx, req, err);
197 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
199 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
202 if (ctx->fake_req_limit <=
203 atomic_read(&qp_ctx->qp->qp_status.used) &&
204 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
207 mutex_lock(&qp_ctx->req_lock);
208 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
210 if (ctx->fake_req_limit <=
211 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
212 list_add_tail(&req->backlog_head, &qp_ctx->backlog);
213 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
214 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
215 mutex_unlock(&qp_ctx->req_lock);
218 mutex_unlock(&qp_ctx->req_lock);
220 if (unlikely(ret == -EBUSY))
225 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
231 /* Get DMA memory resources */
232 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
236 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
237 &res->c_ivin_dma, GFP_KERNEL);
241 for (i = 1; i < QM_Q_DEPTH; i++) {
242 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
243 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
249 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
252 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
253 res->c_ivin, res->c_ivin_dma);
256 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
260 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
261 &res->out_mac_dma, GFP_KERNEL);
265 for (i = 1; i < QM_Q_DEPTH; i++) {
266 res[i].out_mac_dma = res->out_mac_dma +
267 i * (SEC_MAX_MAC_LEN << 1);
268 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
274 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
277 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
278 res->out_mac, res->out_mac_dma);
281 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
284 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
285 res->pbuf, res->pbuf_dma);
289 * To improve performance, pbuffer is used for
290 * small packets (< 512Bytes) as IOMMU translation using.
292 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
294 int pbuf_page_offset;
297 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
298 &res->pbuf_dma, GFP_KERNEL);
303 * SEC_PBUF_PKG contains data pbuf, iv and
304 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
305 * Every PAGE contains six SEC_PBUF_PKG
306 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
307 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
308 * for the SEC_TOTAL_PBUF_SZ
310 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
311 pbuf_page_offset = PAGE_SIZE * i;
312 for (j = 0; j < SEC_PBUF_NUM; j++) {
313 k = i * SEC_PBUF_NUM + j;
316 res[k].pbuf = res->pbuf +
317 j * SEC_PBUF_PKG + pbuf_page_offset;
318 res[k].pbuf_dma = res->pbuf_dma +
319 j * SEC_PBUF_PKG + pbuf_page_offset;
326 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
327 struct sec_qp_ctx *qp_ctx)
329 struct sec_alg_res *res = qp_ctx->res;
330 struct device *dev = ctx->dev;
333 ret = sec_alloc_civ_resource(dev, res);
337 if (ctx->alg_type == SEC_AEAD) {
338 ret = sec_alloc_mac_resource(dev, res);
342 if (ctx->pbuf_supported) {
343 ret = sec_alloc_pbuf_resource(dev, res);
345 dev_err(dev, "fail to alloc pbuf dma resource!\n");
346 goto alloc_pbuf_fail;
353 if (ctx->alg_type == SEC_AEAD)
354 sec_free_mac_resource(dev, qp_ctx->res);
356 sec_free_civ_resource(dev, res);
360 static void sec_alg_resource_free(struct sec_ctx *ctx,
361 struct sec_qp_ctx *qp_ctx)
363 struct device *dev = ctx->dev;
365 sec_free_civ_resource(dev, qp_ctx->res);
367 if (ctx->pbuf_supported)
368 sec_free_pbuf_resource(dev, qp_ctx->res);
369 if (ctx->alg_type == SEC_AEAD)
370 sec_free_mac_resource(dev, qp_ctx->res);
373 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
374 int qp_ctx_id, int alg_type)
376 struct device *dev = ctx->dev;
377 struct sec_qp_ctx *qp_ctx;
381 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
382 qp = ctx->qps[qp_ctx_id];
385 qp->req_cb = sec_req_cb;
389 mutex_init(&qp_ctx->req_lock);
390 idr_init(&qp_ctx->req_idr);
391 INIT_LIST_HEAD(&qp_ctx->backlog);
393 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
395 if (IS_ERR(qp_ctx->c_in_pool)) {
396 dev_err(dev, "fail to create sgl pool for input!\n");
397 goto err_destroy_idr;
400 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
402 if (IS_ERR(qp_ctx->c_out_pool)) {
403 dev_err(dev, "fail to create sgl pool for output!\n");
404 goto err_free_c_in_pool;
407 ret = sec_alg_resource_alloc(ctx, qp_ctx);
409 goto err_free_c_out_pool;
411 ret = hisi_qm_start_qp(qp, 0);
418 sec_alg_resource_free(ctx, qp_ctx);
420 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
422 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
424 idr_destroy(&qp_ctx->req_idr);
428 static void sec_release_qp_ctx(struct sec_ctx *ctx,
429 struct sec_qp_ctx *qp_ctx)
431 struct device *dev = ctx->dev;
433 hisi_qm_stop_qp(qp_ctx->qp);
434 sec_alg_resource_free(ctx, qp_ctx);
436 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
437 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
439 idr_destroy(&qp_ctx->req_idr);
442 static int sec_ctx_base_init(struct sec_ctx *ctx)
447 ctx->qps = sec_create_qps();
449 pr_err("Can not create sec qps!\n");
453 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
455 ctx->dev = &sec->qm.pdev->dev;
456 ctx->hlf_q_num = sec->ctx_q_num >> 1;
458 ctx->pbuf_supported = ctx->sec->iommu_used;
460 /* Half of queue depth is taken as fake requests limit in the queue. */
461 ctx->fake_req_limit = QM_Q_DEPTH >> 1;
462 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
466 goto err_destroy_qps;
469 for (i = 0; i < sec->ctx_q_num; i++) {
470 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
472 goto err_sec_release_qp_ctx;
477 err_sec_release_qp_ctx:
478 for (i = i - 1; i >= 0; i--)
479 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
482 sec_destroy_qps(ctx->qps, sec->ctx_q_num);
486 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
490 for (i = 0; i < ctx->sec->ctx_q_num; i++)
491 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
493 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
497 static int sec_cipher_init(struct sec_ctx *ctx)
499 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
501 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
502 &c_ctx->c_key_dma, GFP_KERNEL);
509 static void sec_cipher_uninit(struct sec_ctx *ctx)
511 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
513 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
514 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
515 c_ctx->c_key, c_ctx->c_key_dma);
518 static int sec_auth_init(struct sec_ctx *ctx)
520 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
522 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
523 &a_ctx->a_key_dma, GFP_KERNEL);
530 static void sec_auth_uninit(struct sec_ctx *ctx)
532 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
534 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
535 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
536 a_ctx->a_key, a_ctx->a_key_dma);
539 static int sec_skcipher_init(struct crypto_skcipher *tfm)
541 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
544 ctx->alg_type = SEC_SKCIPHER;
545 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
546 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
547 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
548 pr_err("get error skcipher iv size!\n");
552 ret = sec_ctx_base_init(ctx);
556 ret = sec_cipher_init(ctx);
558 goto err_cipher_init;
563 sec_ctx_base_uninit(ctx);
567 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
569 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
571 sec_cipher_uninit(ctx);
572 sec_ctx_base_uninit(ctx);
575 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
577 const enum sec_cmode c_mode)
579 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
580 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
583 ret = verify_skcipher_des3_key(tfm, key);
588 case SEC_DES3_2KEY_SIZE:
589 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
591 case SEC_DES3_3KEY_SIZE:
592 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
601 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
603 const enum sec_cmode c_mode)
605 if (c_mode == SEC_CMODE_XTS) {
607 case SEC_XTS_MIN_KEY_SIZE:
608 c_ctx->c_key_len = SEC_CKEY_128BIT;
610 case SEC_XTS_MAX_KEY_SIZE:
611 c_ctx->c_key_len = SEC_CKEY_256BIT;
614 pr_err("hisi_sec2: xts mode key error!\n");
619 case AES_KEYSIZE_128:
620 c_ctx->c_key_len = SEC_CKEY_128BIT;
622 case AES_KEYSIZE_192:
623 c_ctx->c_key_len = SEC_CKEY_192BIT;
625 case AES_KEYSIZE_256:
626 c_ctx->c_key_len = SEC_CKEY_256BIT;
629 pr_err("hisi_sec2: aes key error!\n");
637 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
638 const u32 keylen, const enum sec_calg c_alg,
639 const enum sec_cmode c_mode)
641 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
642 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
643 struct device *dev = ctx->dev;
646 if (c_mode == SEC_CMODE_XTS) {
647 ret = xts_verify_key(tfm, key, keylen);
649 dev_err(dev, "xts mode key err!\n");
654 c_ctx->c_alg = c_alg;
655 c_ctx->c_mode = c_mode;
659 ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
663 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
670 dev_err(dev, "set sec key err!\n");
674 memcpy(c_ctx->c_key, key, keylen);
679 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
680 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
683 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
686 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
687 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
688 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
690 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
691 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
693 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
694 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
696 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
697 struct scatterlist *src)
699 struct aead_request *aead_req = req->aead_req.aead_req;
700 struct sec_cipher_req *c_req = &req->c_req;
701 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
702 struct device *dev = ctx->dev;
703 int copy_size, pbuf_length;
704 int req_id = req->req_id;
706 if (ctx->alg_type == SEC_AEAD)
707 copy_size = aead_req->cryptlen + aead_req->assoclen;
709 copy_size = c_req->c_len;
711 pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
712 qp_ctx->res[req_id].pbuf,
714 if (unlikely(pbuf_length != copy_size)) {
715 dev_err(dev, "copy src data to pbuf error!\n");
719 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
720 c_req->c_out_dma = c_req->c_in_dma;
725 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
726 struct scatterlist *dst)
728 struct aead_request *aead_req = req->aead_req.aead_req;
729 struct sec_cipher_req *c_req = &req->c_req;
730 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
731 struct device *dev = ctx->dev;
732 int copy_size, pbuf_length;
733 int req_id = req->req_id;
735 if (ctx->alg_type == SEC_AEAD)
736 copy_size = c_req->c_len + aead_req->assoclen;
738 copy_size = c_req->c_len;
740 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
741 qp_ctx->res[req_id].pbuf,
743 if (unlikely(pbuf_length != copy_size))
744 dev_err(dev, "copy pbuf data to dst error!\n");
747 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
748 struct scatterlist *src, struct scatterlist *dst)
750 struct sec_cipher_req *c_req = &req->c_req;
751 struct sec_aead_req *a_req = &req->aead_req;
752 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
753 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
754 struct device *dev = ctx->dev;
758 ret = sec_cipher_pbuf_map(ctx, req, src);
759 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
760 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
761 if (ctx->alg_type == SEC_AEAD) {
762 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
763 a_req->out_mac_dma = res->pbuf_dma +
769 c_req->c_ivin = res->c_ivin;
770 c_req->c_ivin_dma = res->c_ivin_dma;
771 if (ctx->alg_type == SEC_AEAD) {
772 a_req->out_mac = res->out_mac;
773 a_req->out_mac_dma = res->out_mac_dma;
776 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
781 if (IS_ERR(c_req->c_in)) {
782 dev_err(dev, "fail to dma map input sgl buffers!\n");
783 return PTR_ERR(c_req->c_in);
787 c_req->c_out = c_req->c_in;
788 c_req->c_out_dma = c_req->c_in_dma;
790 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
795 if (IS_ERR(c_req->c_out)) {
796 dev_err(dev, "fail to dma map output sgl buffers!\n");
797 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
798 return PTR_ERR(c_req->c_out);
805 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
806 struct scatterlist *src, struct scatterlist *dst)
808 struct sec_cipher_req *c_req = &req->c_req;
809 struct device *dev = ctx->dev;
812 sec_cipher_pbuf_unmap(ctx, req, dst);
815 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
817 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
821 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
823 struct skcipher_request *sq = req->c_req.sk_req;
825 return sec_cipher_map(ctx, req, sq->src, sq->dst);
828 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
830 struct skcipher_request *sq = req->c_req.sk_req;
832 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
835 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
836 struct crypto_authenc_keys *keys)
838 switch (keys->enckeylen) {
839 case AES_KEYSIZE_128:
840 c_ctx->c_key_len = SEC_CKEY_128BIT;
842 case AES_KEYSIZE_192:
843 c_ctx->c_key_len = SEC_CKEY_192BIT;
845 case AES_KEYSIZE_256:
846 c_ctx->c_key_len = SEC_CKEY_256BIT;
849 pr_err("hisi_sec2: aead aes key error!\n");
852 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
857 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
858 struct crypto_authenc_keys *keys)
860 struct crypto_shash *hash_tfm = ctx->hash_tfm;
861 int blocksize, digestsize, ret;
863 if (!keys->authkeylen) {
864 pr_err("hisi_sec2: aead auth key error!\n");
868 blocksize = crypto_shash_blocksize(hash_tfm);
869 digestsize = crypto_shash_digestsize(hash_tfm);
870 if (keys->authkeylen > blocksize) {
871 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
872 keys->authkeylen, ctx->a_key);
874 pr_err("hisi_sec2: aead auth digest error!\n");
877 ctx->a_key_len = digestsize;
879 memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
880 ctx->a_key_len = keys->authkeylen;
886 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
887 const u32 keylen, const enum sec_hash_alg a_alg,
888 const enum sec_calg c_alg,
889 const enum sec_mac_len mac_len,
890 const enum sec_cmode c_mode)
892 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
893 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
894 struct device *dev = ctx->dev;
895 struct crypto_authenc_keys keys;
898 ctx->a_ctx.a_alg = a_alg;
899 ctx->c_ctx.c_alg = c_alg;
900 ctx->a_ctx.mac_len = mac_len;
901 c_ctx->c_mode = c_mode;
903 if (crypto_authenc_extractkeys(&keys, key, keylen))
906 ret = sec_aead_aes_set_key(c_ctx, &keys);
908 dev_err(dev, "set sec cipher key err!\n");
912 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
914 dev_err(dev, "set sec auth key err!\n");
921 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
926 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
927 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
930 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
933 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
934 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
935 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
936 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
937 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
938 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
940 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
942 struct aead_request *aq = req->aead_req.aead_req;
944 return sec_cipher_map(ctx, req, aq->src, aq->dst);
947 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
949 struct aead_request *aq = req->aead_req.aead_req;
951 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
954 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
958 ret = ctx->req_op->buf_map(ctx, req);
962 ctx->req_op->do_transfer(ctx, req);
964 ret = ctx->req_op->bd_fill(ctx, req);
971 ctx->req_op->buf_unmap(ctx, req);
975 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
977 ctx->req_op->buf_unmap(ctx, req);
980 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
982 struct skcipher_request *sk_req = req->c_req.sk_req;
983 struct sec_cipher_req *c_req = &req->c_req;
985 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
988 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
990 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
991 struct sec_cipher_req *c_req = &req->c_req;
992 struct sec_sqe *sec_sqe = &req->sec_sqe;
993 u8 scene, sa_type, da_type;
997 memset(sec_sqe, 0, sizeof(struct sec_sqe));
999 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1000 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1001 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
1002 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1004 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1006 sec_sqe->type2.c_alg = c_ctx->c_alg;
1007 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1010 bd_type = SEC_BD_TYPE2;
1012 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1014 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1015 sec_sqe->type_cipher_auth = bd_type | cipher;
1018 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1020 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1021 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1022 if (c_req->c_in_dma != c_req->c_out_dma)
1023 de = 0x1 << SEC_DE_OFFSET;
1025 sec_sqe->sds_sa_type = (de | scene | sa_type);
1027 /* Just set DST address type */
1029 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1031 da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1032 sec_sqe->sdm_addr_type |= da_type;
1034 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1035 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1040 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1042 struct aead_request *aead_req = req->aead_req.aead_req;
1043 struct skcipher_request *sk_req = req->c_req.sk_req;
1044 u32 iv_size = req->ctx->c_ctx.ivsize;
1045 struct scatterlist *sgl;
1046 unsigned int cryptlen;
1050 if (req->c_req.encrypt)
1051 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1053 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1055 if (alg_type == SEC_SKCIPHER) {
1057 cryptlen = sk_req->cryptlen;
1060 cryptlen = aead_req->cryptlen;
1063 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1064 cryptlen - iv_size);
1065 if (unlikely(sz != iv_size))
1066 dev_err(req->ctx->dev, "copy output iv error!\n");
1069 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1070 struct sec_qp_ctx *qp_ctx)
1072 struct sec_req *backlog_req = NULL;
1074 mutex_lock(&qp_ctx->req_lock);
1075 if (ctx->fake_req_limit >=
1076 atomic_read(&qp_ctx->qp->qp_status.used) &&
1077 !list_empty(&qp_ctx->backlog)) {
1078 backlog_req = list_first_entry(&qp_ctx->backlog,
1079 typeof(*backlog_req), backlog_head);
1080 list_del(&backlog_req->backlog_head);
1082 mutex_unlock(&qp_ctx->req_lock);
1087 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1090 struct skcipher_request *sk_req = req->c_req.sk_req;
1091 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1092 struct skcipher_request *backlog_sk_req;
1093 struct sec_req *backlog_req;
1095 sec_free_req_id(req);
1097 /* IV output at encrypto of CBC mode */
1098 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
1099 sec_update_iv(req, SEC_SKCIPHER);
1102 backlog_req = sec_back_req_clear(ctx, qp_ctx);
1106 backlog_sk_req = backlog_req->c_req.sk_req;
1107 backlog_sk_req->base.complete(&backlog_sk_req->base,
1109 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1112 sk_req->base.complete(&sk_req->base, err);
1115 static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1117 struct aead_request *aead_req = req->aead_req.aead_req;
1118 struct sec_cipher_req *c_req = &req->c_req;
1120 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1123 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1124 struct sec_req *req, struct sec_sqe *sec_sqe)
1126 struct sec_aead_req *a_req = &req->aead_req;
1127 struct sec_cipher_req *c_req = &req->c_req;
1128 struct aead_request *aq = a_req->aead_req;
1130 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1132 sec_sqe->type2.mac_key_alg =
1133 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1135 sec_sqe->type2.mac_key_alg |=
1136 cpu_to_le32((u32)((ctx->a_key_len) /
1137 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1139 sec_sqe->type2.mac_key_alg |=
1140 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1142 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1145 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1147 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1149 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1151 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1153 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1156 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1158 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1159 struct sec_sqe *sec_sqe = &req->sec_sqe;
1162 ret = sec_skcipher_bd_fill(ctx, req);
1163 if (unlikely(ret)) {
1164 dev_err(ctx->dev, "skcipher bd fill is error!\n");
1168 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1173 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1175 struct aead_request *a_req = req->aead_req.aead_req;
1176 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1177 struct sec_aead_req *aead_req = &req->aead_req;
1178 struct sec_cipher_req *c_req = &req->c_req;
1179 size_t authsize = crypto_aead_authsize(tfm);
1180 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1181 struct aead_request *backlog_aead_req;
1182 struct sec_req *backlog_req;
1185 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1186 sec_update_iv(req, SEC_AEAD);
1188 /* Copy output mac */
1189 if (!err && c_req->encrypt) {
1190 struct scatterlist *sgl = a_req->dst;
1192 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1194 authsize, a_req->cryptlen +
1197 if (unlikely(sz != authsize)) {
1198 dev_err(c->dev, "copy out mac err!\n");
1203 sec_free_req_id(req);
1206 backlog_req = sec_back_req_clear(c, qp_ctx);
1210 backlog_aead_req = backlog_req->aead_req.aead_req;
1211 backlog_aead_req->base.complete(&backlog_aead_req->base,
1213 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1216 a_req->base.complete(&a_req->base, err);
1219 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1221 sec_free_req_id(req);
1222 sec_free_queue_id(ctx, req);
1225 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1227 struct sec_qp_ctx *qp_ctx;
1230 /* To load balance */
1231 queue_id = sec_alloc_queue_id(ctx, req);
1232 qp_ctx = &ctx->qp_ctx[queue_id];
1234 req->req_id = sec_alloc_req_id(req, qp_ctx);
1235 if (unlikely(req->req_id < 0)) {
1236 sec_free_queue_id(ctx, req);
1243 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1245 struct sec_cipher_req *c_req = &req->c_req;
1248 ret = sec_request_init(ctx, req);
1252 ret = sec_request_transfer(ctx, req);
1254 goto err_uninit_req;
1256 /* Output IV as decrypto */
1257 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
1258 sec_update_iv(req, ctx->alg_type);
1260 ret = ctx->req_op->bd_send(ctx, req);
1261 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1262 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1263 dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1270 /* As failing, restore the IV from user */
1271 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1272 if (ctx->alg_type == SEC_SKCIPHER)
1273 memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1276 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1280 sec_request_untransfer(ctx, req);
1282 sec_request_uninit(ctx, req);
1286 static const struct sec_req_op sec_skcipher_req_ops = {
1287 .buf_map = sec_skcipher_sgl_map,
1288 .buf_unmap = sec_skcipher_sgl_unmap,
1289 .do_transfer = sec_skcipher_copy_iv,
1290 .bd_fill = sec_skcipher_bd_fill,
1291 .bd_send = sec_bd_send,
1292 .callback = sec_skcipher_callback,
1293 .process = sec_process,
1296 static const struct sec_req_op sec_aead_req_ops = {
1297 .buf_map = sec_aead_sgl_map,
1298 .buf_unmap = sec_aead_sgl_unmap,
1299 .do_transfer = sec_aead_copy_iv,
1300 .bd_fill = sec_aead_bd_fill,
1301 .bd_send = sec_bd_send,
1302 .callback = sec_aead_callback,
1303 .process = sec_process,
1306 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1308 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1310 ctx->req_op = &sec_skcipher_req_ops;
1312 return sec_skcipher_init(tfm);
1315 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1317 sec_skcipher_uninit(tfm);
1320 static int sec_aead_init(struct crypto_aead *tfm)
1322 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1325 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1326 ctx->alg_type = SEC_AEAD;
1327 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1328 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1329 dev_err(ctx->dev, "get error aead iv size!\n");
1333 ctx->req_op = &sec_aead_req_ops;
1334 ret = sec_ctx_base_init(ctx);
1338 ret = sec_auth_init(ctx);
1342 ret = sec_cipher_init(ctx);
1344 goto err_cipher_init;
1349 sec_auth_uninit(ctx);
1351 sec_ctx_base_uninit(ctx);
1355 static void sec_aead_exit(struct crypto_aead *tfm)
1357 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1359 sec_cipher_uninit(ctx);
1360 sec_auth_uninit(ctx);
1361 sec_ctx_base_uninit(ctx);
1364 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1366 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1367 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1370 ret = sec_aead_init(tfm);
1372 pr_err("hisi_sec2: aead init error!\n");
1376 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1377 if (IS_ERR(auth_ctx->hash_tfm)) {
1378 dev_err(ctx->dev, "aead alloc shash error!\n");
1380 return PTR_ERR(auth_ctx->hash_tfm);
1386 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1388 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1390 crypto_free_shash(ctx->a_ctx.hash_tfm);
1394 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1396 return sec_aead_ctx_init(tfm, "sha1");
1399 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1401 return sec_aead_ctx_init(tfm, "sha256");
1404 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1406 return sec_aead_ctx_init(tfm, "sha512");
1410 static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
1411 struct sec_req *sreq)
1413 u32 cryptlen = sreq->c_req.sk_req->cryptlen;
1414 struct device *dev = ctx->dev;
1415 u8 c_mode = ctx->c_ctx.c_mode;
1420 if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
1421 dev_err(dev, "skcipher XTS mode input length error!\n");
1427 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
1428 dev_err(dev, "skcipher AES input length error!\n");
1439 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1441 struct skcipher_request *sk_req = sreq->c_req.sk_req;
1442 struct device *dev = ctx->dev;
1443 u8 c_alg = ctx->c_ctx.c_alg;
1445 if (unlikely(!sk_req->src || !sk_req->dst)) {
1446 dev_err(dev, "skcipher input param error!\n");
1449 sreq->c_req.c_len = sk_req->cryptlen;
1451 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
1452 sreq->use_pbuf = true;
1454 sreq->use_pbuf = false;
1456 if (c_alg == SEC_CALG_3DES) {
1457 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
1458 dev_err(dev, "skcipher 3des input length error!\n");
1462 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
1463 return sec_skcipher_cryptlen_ckeck(ctx, sreq);
1466 dev_err(dev, "skcipher algorithm error!\n");
1471 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
1473 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
1474 struct sec_req *req = skcipher_request_ctx(sk_req);
1475 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1478 if (!sk_req->cryptlen)
1481 req->flag = sk_req->base.flags;
1482 req->c_req.sk_req = sk_req;
1483 req->c_req.encrypt = encrypt;
1486 ret = sec_skcipher_param_check(ctx, req);
1490 return ctx->req_op->process(ctx, req);
1493 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
1495 return sec_skcipher_crypto(sk_req, true);
1498 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
1500 return sec_skcipher_crypto(sk_req, false);
1503 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
1504 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
1507 .cra_name = sec_cra_name,\
1508 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1509 .cra_priority = SEC_PRIORITY,\
1510 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1511 .cra_blocksize = blk_size,\
1512 .cra_ctxsize = sizeof(struct sec_ctx),\
1513 .cra_module = THIS_MODULE,\
1517 .setkey = sec_set_key,\
1518 .decrypt = sec_skcipher_decrypt,\
1519 .encrypt = sec_skcipher_encrypt,\
1520 .min_keysize = sec_min_key_size,\
1521 .max_keysize = sec_max_key_size,\
1525 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
1526 max_key_size, blk_size, iv_size) \
1527 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
1528 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
1530 static struct skcipher_alg sec_skciphers[] = {
1531 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
1532 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1535 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
1536 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1537 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1539 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
1540 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
1541 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1543 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
1544 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1545 DES3_EDE_BLOCK_SIZE, 0)
1547 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
1548 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1549 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
1551 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
1552 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
1553 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1555 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
1556 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
1557 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1560 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1562 struct aead_request *req = sreq->aead_req.aead_req;
1563 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1564 size_t authsize = crypto_aead_authsize(tfm);
1565 struct device *dev = ctx->dev;
1566 u8 c_alg = ctx->c_ctx.c_alg;
1568 if (unlikely(!req->src || !req->dst || !req->cryptlen ||
1569 req->assoclen > SEC_MAX_AAD_LEN)) {
1570 dev_err(dev, "aead input param error!\n");
1574 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
1576 sreq->use_pbuf = true;
1578 sreq->use_pbuf = false;
1580 /* Support AES only */
1581 if (unlikely(c_alg != SEC_CALG_AES)) {
1582 dev_err(dev, "aead crypto alg error!\n");
1585 if (sreq->c_req.encrypt)
1586 sreq->c_req.c_len = req->cryptlen;
1588 sreq->c_req.c_len = req->cryptlen - authsize;
1590 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
1591 dev_err(dev, "aead crypto length error!\n");
1598 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
1600 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1601 struct sec_req *req = aead_request_ctx(a_req);
1602 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1605 req->flag = a_req->base.flags;
1606 req->aead_req.aead_req = a_req;
1607 req->c_req.encrypt = encrypt;
1610 ret = sec_aead_param_check(ctx, req);
1614 return ctx->req_op->process(ctx, req);
1617 static int sec_aead_encrypt(struct aead_request *a_req)
1619 return sec_aead_crypto(a_req, true);
1622 static int sec_aead_decrypt(struct aead_request *a_req)
1624 return sec_aead_crypto(a_req, false);
1627 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
1628 ctx_exit, blk_size, iv_size, max_authsize)\
1631 .cra_name = sec_cra_name,\
1632 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1633 .cra_priority = SEC_PRIORITY,\
1634 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1635 .cra_blocksize = blk_size,\
1636 .cra_ctxsize = sizeof(struct sec_ctx),\
1637 .cra_module = THIS_MODULE,\
1641 .setkey = sec_set_key,\
1642 .decrypt = sec_aead_decrypt,\
1643 .encrypt = sec_aead_encrypt,\
1645 .maxauthsize = max_authsize,\
1648 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
1649 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
1650 sec_aead_ctx_exit, blksize, ivsize, authsize)
1652 static struct aead_alg sec_aeads[] = {
1653 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
1654 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
1655 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
1657 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
1658 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
1659 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
1661 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
1662 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
1663 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
1666 int sec_register_to_crypto(struct hisi_qm *qm)
1670 /* To avoid repeat register */
1671 ret = crypto_register_skciphers(sec_skciphers,
1672 ARRAY_SIZE(sec_skciphers));
1676 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1678 crypto_unregister_skciphers(sec_skciphers,
1679 ARRAY_SIZE(sec_skciphers));
1683 void sec_unregister_from_crypto(struct hisi_qm *qm)
1685 crypto_unregister_skciphers(sec_skciphers,
1686 ARRAY_SIZE(sec_skciphers));
1687 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));