1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <crypto/aes.h>
5 #include <crypto/aead.h>
6 #include <crypto/algapi.h>
7 #include <crypto/authenc.h>
8 #include <crypto/des.h>
9 #include <crypto/hash.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/idr.h>
21 #include "sec_crypto.h"
23 #define SEC_PRIORITY 4001
24 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
25 #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
26 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
27 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
28 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
30 /* SEC sqe(bd) bit operational relative MACRO */
31 #define SEC_DE_OFFSET 1
32 #define SEC_CIPHER_OFFSET 4
33 #define SEC_SCENE_OFFSET 3
34 #define SEC_DST_SGL_OFFSET 2
35 #define SEC_SRC_SGL_OFFSET 7
36 #define SEC_CKEY_OFFSET 9
37 #define SEC_CMODE_OFFSET 12
38 #define SEC_AKEY_OFFSET 5
39 #define SEC_AEAD_ALG_OFFSET 11
40 #define SEC_AUTH_OFFSET 6
42 #define SEC_DE_OFFSET_V3 9
43 #define SEC_SCENE_OFFSET_V3 5
44 #define SEC_CKEY_OFFSET_V3 13
45 #define SEC_SRC_SGL_OFFSET_V3 11
46 #define SEC_DST_SGL_OFFSET_V3 14
47 #define SEC_CALG_OFFSET_V3 4
48 #define SEC_AKEY_OFFSET_V3 9
49 #define SEC_MAC_OFFSET_V3 4
50 #define SEC_AUTH_ALG_OFFSET_V3 15
51 #define SEC_CIPHER_AUTH_V3 0xbf
52 #define SEC_AUTH_CIPHER_V3 0x40
53 #define SEC_FLAG_OFFSET 7
54 #define SEC_FLAG_MASK 0x0780
55 #define SEC_TYPE_MASK 0x0F
56 #define SEC_DONE_MASK 0x0001
57 #define SEC_ICV_MASK 0x000E
58 #define SEC_SQE_LEN_RATE_MASK 0x3
60 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
61 #define SEC_SGL_SGE_NR 128
62 #define SEC_CIPHER_AUTH 0xfe
63 #define SEC_AUTH_CIPHER 0x1
64 #define SEC_MAX_MAC_LEN 64
65 #define SEC_MAX_AAD_LEN 65535
66 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
68 #define SEC_PBUF_SZ 512
69 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
70 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
71 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
73 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
74 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
75 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
76 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
77 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
80 #define SEC_SQE_LEN_RATE 4
81 #define SEC_SQE_CFLAG 2
82 #define SEC_SQE_AEAD_FLAG 3
83 #define SEC_SQE_DONE 0x1
84 #define SEC_ICV_ERR 0x2
86 #define MAC_LEN_MASK 0x1U
87 #define MAX_INPUT_DATA_LEN 0xFFFE00
88 #define BITS_MASK 0xFF
90 #define SEC_XTS_NAME_SZ 0x3
91 #define IV_CM_CAL_NUM 2
92 #define IV_CL_MASK 0x7
96 #define IV_FLAGS_OFFSET 0x6
97 #define IV_CM_OFFSET 0x3
98 #define IV_LAST_BYTE1 1
99 #define IV_LAST_BYTE2 2
100 #define IV_LAST_BYTE_MASK 0xFF
101 #define IV_CTR_INIT 0x1
102 #define IV_BYTE_OFFSET 0x8
104 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
105 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
107 if (req->c_req.encrypt)
108 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
111 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
115 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
117 if (req->c_req.encrypt)
118 atomic_dec(&ctx->enc_qcyclic);
120 atomic_dec(&ctx->dec_qcyclic);
123 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
127 mutex_lock(&qp_ctx->req_lock);
129 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
130 0, QM_Q_DEPTH, GFP_ATOMIC);
131 mutex_unlock(&qp_ctx->req_lock);
132 if (unlikely(req_id < 0)) {
133 dev_err(req->ctx->dev, "alloc req id fail!\n");
137 req->qp_ctx = qp_ctx;
138 qp_ctx->req_list[req_id] = req;
143 static void sec_free_req_id(struct sec_req *req)
145 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
146 int req_id = req->req_id;
148 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
149 dev_err(req->ctx->dev, "free request id invalid!\n");
153 qp_ctx->req_list[req_id] = NULL;
156 mutex_lock(&qp_ctx->req_lock);
157 idr_remove(&qp_ctx->req_idr, req_id);
158 mutex_unlock(&qp_ctx->req_lock);
161 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
163 struct sec_sqe *bd = resp;
165 status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
166 status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
167 status->flag = (le16_to_cpu(bd->type2.done_flag) &
168 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
169 status->tag = le16_to_cpu(bd->type2.tag);
170 status->err_type = bd->type2.error_type;
172 return bd->type_cipher_auth & SEC_TYPE_MASK;
175 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
177 struct sec_sqe3 *bd3 = resp;
179 status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
180 status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
181 status->flag = (le16_to_cpu(bd3->done_flag) &
182 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
183 status->tag = le64_to_cpu(bd3->tag);
184 status->err_type = bd3->error_type;
186 return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
189 static int sec_cb_status_check(struct sec_req *req,
190 struct bd_status *status)
192 struct sec_ctx *ctx = req->ctx;
194 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
195 dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
196 req->err_type, status->done);
200 if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
201 if (unlikely(status->flag != SEC_SQE_CFLAG)) {
202 dev_err_ratelimited(ctx->dev, "flag[%u]\n",
206 } else if (unlikely(ctx->alg_type == SEC_AEAD)) {
207 if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
208 status->icv == SEC_ICV_ERR)) {
209 dev_err_ratelimited(ctx->dev,
210 "flag[%u], icv[%u]\n",
211 status->flag, status->icv);
219 static void sec_req_cb(struct hisi_qp *qp, void *resp)
221 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
222 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
223 u8 type_supported = qp_ctx->ctx->type_supported;
224 struct bd_status status;
230 if (type_supported == SEC_BD_TYPE2) {
231 type = pre_parse_finished_bd(&status, resp);
232 req = qp_ctx->req_list[status.tag];
234 type = pre_parse_finished_bd3(&status, resp);
235 req = (void *)(uintptr_t)status.tag;
238 if (unlikely(type != type_supported)) {
239 atomic64_inc(&dfx->err_bd_cnt);
240 pr_err("err bd type [%d]\n", type);
244 if (unlikely(!req)) {
245 atomic64_inc(&dfx->invalid_req_cnt);
246 atomic_inc(&qp->qp_status.used);
250 req->err_type = status.err_type;
252 err = sec_cb_status_check(req, &status);
254 atomic64_inc(&dfx->done_flag_cnt);
256 atomic64_inc(&dfx->recv_cnt);
258 ctx->req_op->buf_unmap(ctx, req);
260 ctx->req_op->callback(ctx, req, err);
263 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
265 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
268 if (ctx->fake_req_limit <=
269 atomic_read(&qp_ctx->qp->qp_status.used) &&
270 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
273 mutex_lock(&qp_ctx->req_lock);
274 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
276 if (ctx->fake_req_limit <=
277 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
278 list_add_tail(&req->backlog_head, &qp_ctx->backlog);
279 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
280 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
281 mutex_unlock(&qp_ctx->req_lock);
284 mutex_unlock(&qp_ctx->req_lock);
286 if (unlikely(ret == -EBUSY))
291 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
297 /* Get DMA memory resources */
298 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
302 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
303 &res->c_ivin_dma, GFP_KERNEL);
307 for (i = 1; i < QM_Q_DEPTH; i++) {
308 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
309 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
315 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
318 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
319 res->c_ivin, res->c_ivin_dma);
322 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
326 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
327 &res->a_ivin_dma, GFP_KERNEL);
331 for (i = 1; i < QM_Q_DEPTH; i++) {
332 res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
333 res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
339 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
342 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
343 res->a_ivin, res->a_ivin_dma);
346 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
350 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
351 &res->out_mac_dma, GFP_KERNEL);
355 for (i = 1; i < QM_Q_DEPTH; i++) {
356 res[i].out_mac_dma = res->out_mac_dma +
357 i * (SEC_MAX_MAC_LEN << 1);
358 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
364 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
367 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
368 res->out_mac, res->out_mac_dma);
371 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
374 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
375 res->pbuf, res->pbuf_dma);
379 * To improve performance, pbuffer is used for
380 * small packets (< 512Bytes) as IOMMU translation using.
382 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
384 int pbuf_page_offset;
387 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
388 &res->pbuf_dma, GFP_KERNEL);
393 * SEC_PBUF_PKG contains data pbuf, iv and
394 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
395 * Every PAGE contains six SEC_PBUF_PKG
396 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
397 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
398 * for the SEC_TOTAL_PBUF_SZ
400 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
401 pbuf_page_offset = PAGE_SIZE * i;
402 for (j = 0; j < SEC_PBUF_NUM; j++) {
403 k = i * SEC_PBUF_NUM + j;
406 res[k].pbuf = res->pbuf +
407 j * SEC_PBUF_PKG + pbuf_page_offset;
408 res[k].pbuf_dma = res->pbuf_dma +
409 j * SEC_PBUF_PKG + pbuf_page_offset;
416 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
417 struct sec_qp_ctx *qp_ctx)
419 struct sec_alg_res *res = qp_ctx->res;
420 struct device *dev = ctx->dev;
423 ret = sec_alloc_civ_resource(dev, res);
427 if (ctx->alg_type == SEC_AEAD) {
428 ret = sec_alloc_aiv_resource(dev, res);
432 ret = sec_alloc_mac_resource(dev, res);
436 if (ctx->pbuf_supported) {
437 ret = sec_alloc_pbuf_resource(dev, res);
439 dev_err(dev, "fail to alloc pbuf dma resource!\n");
440 goto alloc_pbuf_fail;
447 if (ctx->alg_type == SEC_AEAD)
448 sec_free_mac_resource(dev, qp_ctx->res);
450 if (ctx->alg_type == SEC_AEAD)
451 sec_free_aiv_resource(dev, res);
453 sec_free_civ_resource(dev, res);
457 static void sec_alg_resource_free(struct sec_ctx *ctx,
458 struct sec_qp_ctx *qp_ctx)
460 struct device *dev = ctx->dev;
462 sec_free_civ_resource(dev, qp_ctx->res);
464 if (ctx->pbuf_supported)
465 sec_free_pbuf_resource(dev, qp_ctx->res);
466 if (ctx->alg_type == SEC_AEAD)
467 sec_free_mac_resource(dev, qp_ctx->res);
470 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
471 int qp_ctx_id, int alg_type)
473 struct device *dev = ctx->dev;
474 struct sec_qp_ctx *qp_ctx;
478 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
479 qp = ctx->qps[qp_ctx_id];
485 qp->req_cb = sec_req_cb;
487 mutex_init(&qp_ctx->req_lock);
488 idr_init(&qp_ctx->req_idr);
489 INIT_LIST_HEAD(&qp_ctx->backlog);
491 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
493 if (IS_ERR(qp_ctx->c_in_pool)) {
494 dev_err(dev, "fail to create sgl pool for input!\n");
495 goto err_destroy_idr;
498 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
500 if (IS_ERR(qp_ctx->c_out_pool)) {
501 dev_err(dev, "fail to create sgl pool for output!\n");
502 goto err_free_c_in_pool;
505 ret = sec_alg_resource_alloc(ctx, qp_ctx);
507 goto err_free_c_out_pool;
509 ret = hisi_qm_start_qp(qp, 0);
516 sec_alg_resource_free(ctx, qp_ctx);
518 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
520 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
522 idr_destroy(&qp_ctx->req_idr);
526 static void sec_release_qp_ctx(struct sec_ctx *ctx,
527 struct sec_qp_ctx *qp_ctx)
529 struct device *dev = ctx->dev;
531 hisi_qm_stop_qp(qp_ctx->qp);
532 sec_alg_resource_free(ctx, qp_ctx);
534 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
535 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
537 idr_destroy(&qp_ctx->req_idr);
540 static int sec_ctx_base_init(struct sec_ctx *ctx)
545 ctx->qps = sec_create_qps();
547 pr_err("Can not create sec qps!\n");
551 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
553 ctx->dev = &sec->qm.pdev->dev;
554 ctx->hlf_q_num = sec->ctx_q_num >> 1;
556 ctx->pbuf_supported = ctx->sec->iommu_used;
558 /* Half of queue depth is taken as fake requests limit in the queue. */
559 ctx->fake_req_limit = QM_Q_DEPTH >> 1;
560 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
564 goto err_destroy_qps;
567 for (i = 0; i < sec->ctx_q_num; i++) {
568 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
570 goto err_sec_release_qp_ctx;
575 err_sec_release_qp_ctx:
576 for (i = i - 1; i >= 0; i--)
577 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
580 sec_destroy_qps(ctx->qps, sec->ctx_q_num);
584 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
588 for (i = 0; i < ctx->sec->ctx_q_num; i++)
589 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
591 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
595 static int sec_cipher_init(struct sec_ctx *ctx)
597 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
599 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
600 &c_ctx->c_key_dma, GFP_KERNEL);
607 static void sec_cipher_uninit(struct sec_ctx *ctx)
609 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
611 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
612 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
613 c_ctx->c_key, c_ctx->c_key_dma);
616 static int sec_auth_init(struct sec_ctx *ctx)
618 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
620 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
621 &a_ctx->a_key_dma, GFP_KERNEL);
628 static void sec_auth_uninit(struct sec_ctx *ctx)
630 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
632 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
633 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
634 a_ctx->a_key, a_ctx->a_key_dma);
637 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
639 const char *alg = crypto_tfm_alg_name(&tfm->base);
640 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
641 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
643 c_ctx->fallback = false;
644 if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
647 c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
648 CRYPTO_ALG_NEED_FALLBACK);
649 if (IS_ERR(c_ctx->fbtfm)) {
650 pr_err("failed to alloc fallback tfm!\n");
651 return PTR_ERR(c_ctx->fbtfm);
657 static int sec_skcipher_init(struct crypto_skcipher *tfm)
659 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
662 ctx->alg_type = SEC_SKCIPHER;
663 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
664 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
665 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
666 pr_err("get error skcipher iv size!\n");
670 ret = sec_ctx_base_init(ctx);
674 ret = sec_cipher_init(ctx);
676 goto err_cipher_init;
678 ret = sec_skcipher_fbtfm_init(tfm);
685 sec_cipher_uninit(ctx);
687 sec_ctx_base_uninit(ctx);
691 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
693 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
695 if (ctx->c_ctx.fbtfm)
696 crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
698 sec_cipher_uninit(ctx);
699 sec_ctx_base_uninit(ctx);
702 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
704 const enum sec_cmode c_mode)
706 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
707 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
710 ret = verify_skcipher_des3_key(tfm, key);
715 case SEC_DES3_2KEY_SIZE:
716 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
718 case SEC_DES3_3KEY_SIZE:
719 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
728 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
730 const enum sec_cmode c_mode)
732 if (c_mode == SEC_CMODE_XTS) {
734 case SEC_XTS_MIN_KEY_SIZE:
735 c_ctx->c_key_len = SEC_CKEY_128BIT;
737 case SEC_XTS_MID_KEY_SIZE:
738 c_ctx->fallback = true;
740 case SEC_XTS_MAX_KEY_SIZE:
741 c_ctx->c_key_len = SEC_CKEY_256BIT;
744 pr_err("hisi_sec2: xts mode key error!\n");
748 if (c_ctx->c_alg == SEC_CALG_SM4 &&
749 keylen != AES_KEYSIZE_128) {
750 pr_err("hisi_sec2: sm4 key error!\n");
754 case AES_KEYSIZE_128:
755 c_ctx->c_key_len = SEC_CKEY_128BIT;
757 case AES_KEYSIZE_192:
758 c_ctx->c_key_len = SEC_CKEY_192BIT;
760 case AES_KEYSIZE_256:
761 c_ctx->c_key_len = SEC_CKEY_256BIT;
764 pr_err("hisi_sec2: aes key error!\n");
773 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
774 const u32 keylen, const enum sec_calg c_alg,
775 const enum sec_cmode c_mode)
777 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
778 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
779 struct device *dev = ctx->dev;
782 if (c_mode == SEC_CMODE_XTS) {
783 ret = xts_verify_key(tfm, key, keylen);
785 dev_err(dev, "xts mode key err!\n");
790 c_ctx->c_alg = c_alg;
791 c_ctx->c_mode = c_mode;
795 ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
799 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
806 dev_err(dev, "set sec key err!\n");
810 memcpy(c_ctx->c_key, key, keylen);
811 if (c_ctx->fallback) {
812 ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
814 dev_err(dev, "failed to set fallback skcipher key!\n");
821 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
822 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
825 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
828 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
829 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
830 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
831 GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
832 GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
833 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
834 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
835 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
836 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
837 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
838 GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
839 GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
840 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
842 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
843 struct scatterlist *src)
845 struct sec_aead_req *a_req = &req->aead_req;
846 struct aead_request *aead_req = a_req->aead_req;
847 struct sec_cipher_req *c_req = &req->c_req;
848 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
849 struct device *dev = ctx->dev;
850 int copy_size, pbuf_length;
851 int req_id = req->req_id;
852 struct crypto_aead *tfm;
856 if (ctx->alg_type == SEC_AEAD)
857 copy_size = aead_req->cryptlen + aead_req->assoclen;
859 copy_size = c_req->c_len;
861 pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
862 qp_ctx->res[req_id].pbuf, copy_size);
863 if (unlikely(pbuf_length != copy_size)) {
864 dev_err(dev, "copy src data to pbuf error!\n");
867 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
868 tfm = crypto_aead_reqtfm(aead_req);
869 authsize = crypto_aead_authsize(tfm);
870 mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
871 memcpy(a_req->out_mac, mac_offset, authsize);
874 req->in_dma = qp_ctx->res[req_id].pbuf_dma;
875 c_req->c_out_dma = req->in_dma;
880 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
881 struct scatterlist *dst)
883 struct aead_request *aead_req = req->aead_req.aead_req;
884 struct sec_cipher_req *c_req = &req->c_req;
885 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
886 int copy_size, pbuf_length;
887 int req_id = req->req_id;
889 if (ctx->alg_type == SEC_AEAD)
890 copy_size = c_req->c_len + aead_req->assoclen;
892 copy_size = c_req->c_len;
894 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
895 qp_ctx->res[req_id].pbuf, copy_size);
896 if (unlikely(pbuf_length != copy_size))
897 dev_err(ctx->dev, "copy pbuf data to dst error!\n");
900 static int sec_aead_mac_init(struct sec_aead_req *req)
902 struct aead_request *aead_req = req->aead_req;
903 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
904 size_t authsize = crypto_aead_authsize(tfm);
905 u8 *mac_out = req->out_mac;
906 struct scatterlist *sgl = aead_req->src;
911 skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
912 copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
913 authsize, skip_size);
914 if (unlikely(copy_size != authsize))
920 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
921 struct scatterlist *src, struct scatterlist *dst)
923 struct sec_cipher_req *c_req = &req->c_req;
924 struct sec_aead_req *a_req = &req->aead_req;
925 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
926 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
927 struct device *dev = ctx->dev;
931 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
932 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
933 if (ctx->alg_type == SEC_AEAD) {
934 a_req->a_ivin = res->a_ivin;
935 a_req->a_ivin_dma = res->a_ivin_dma;
936 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
937 a_req->out_mac_dma = res->pbuf_dma +
940 ret = sec_cipher_pbuf_map(ctx, req, src);
944 c_req->c_ivin = res->c_ivin;
945 c_req->c_ivin_dma = res->c_ivin_dma;
946 if (ctx->alg_type == SEC_AEAD) {
947 a_req->a_ivin = res->a_ivin;
948 a_req->a_ivin_dma = res->a_ivin_dma;
949 a_req->out_mac = res->out_mac;
950 a_req->out_mac_dma = res->out_mac_dma;
953 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
957 if (IS_ERR(req->in)) {
958 dev_err(dev, "fail to dma map input sgl buffers!\n");
959 return PTR_ERR(req->in);
962 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
963 ret = sec_aead_mac_init(a_req);
965 dev_err(dev, "fail to init mac data for ICV!\n");
971 c_req->c_out = req->in;
972 c_req->c_out_dma = req->in_dma;
974 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
979 if (IS_ERR(c_req->c_out)) {
980 dev_err(dev, "fail to dma map output sgl buffers!\n");
981 hisi_acc_sg_buf_unmap(dev, src, req->in);
982 return PTR_ERR(c_req->c_out);
989 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
990 struct scatterlist *src, struct scatterlist *dst)
992 struct sec_cipher_req *c_req = &req->c_req;
993 struct device *dev = ctx->dev;
996 sec_cipher_pbuf_unmap(ctx, req, dst);
999 hisi_acc_sg_buf_unmap(dev, src, req->in);
1001 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
1005 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1007 struct skcipher_request *sq = req->c_req.sk_req;
1009 return sec_cipher_map(ctx, req, sq->src, sq->dst);
1012 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1014 struct skcipher_request *sq = req->c_req.sk_req;
1016 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1019 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
1020 struct crypto_authenc_keys *keys)
1022 switch (keys->enckeylen) {
1023 case AES_KEYSIZE_128:
1024 c_ctx->c_key_len = SEC_CKEY_128BIT;
1026 case AES_KEYSIZE_192:
1027 c_ctx->c_key_len = SEC_CKEY_192BIT;
1029 case AES_KEYSIZE_256:
1030 c_ctx->c_key_len = SEC_CKEY_256BIT;
1033 pr_err("hisi_sec2: aead aes key error!\n");
1036 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
1041 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
1042 struct crypto_authenc_keys *keys)
1044 struct crypto_shash *hash_tfm = ctx->hash_tfm;
1045 int blocksize, digestsize, ret;
1047 if (!keys->authkeylen) {
1048 pr_err("hisi_sec2: aead auth key error!\n");
1052 blocksize = crypto_shash_blocksize(hash_tfm);
1053 digestsize = crypto_shash_digestsize(hash_tfm);
1054 if (keys->authkeylen > blocksize) {
1055 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
1056 keys->authkeylen, ctx->a_key);
1058 pr_err("hisi_sec2: aead auth digest error!\n");
1061 ctx->a_key_len = digestsize;
1063 memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
1064 ctx->a_key_len = keys->authkeylen;
1070 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
1072 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1073 struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
1074 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1076 if (unlikely(a_ctx->fallback_aead_tfm))
1077 return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
1082 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
1083 struct crypto_aead *tfm, const u8 *key,
1084 unsigned int keylen)
1086 crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
1087 crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
1088 crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
1089 return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
1092 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1093 const u32 keylen, const enum sec_hash_alg a_alg,
1094 const enum sec_calg c_alg,
1095 const enum sec_mac_len mac_len,
1096 const enum sec_cmode c_mode)
1098 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1099 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1100 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1101 struct device *dev = ctx->dev;
1102 struct crypto_authenc_keys keys;
1105 ctx->a_ctx.a_alg = a_alg;
1106 ctx->c_ctx.c_alg = c_alg;
1107 ctx->a_ctx.mac_len = mac_len;
1108 c_ctx->c_mode = c_mode;
1110 if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
1111 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
1113 dev_err(dev, "set sec aes ccm cipher key err!\n");
1116 memcpy(c_ctx->c_key, key, keylen);
1118 if (unlikely(a_ctx->fallback_aead_tfm)) {
1119 ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1127 if (crypto_authenc_extractkeys(&keys, key, keylen))
1130 ret = sec_aead_aes_set_key(c_ctx, &keys);
1132 dev_err(dev, "set sec cipher key err!\n");
1136 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
1138 dev_err(dev, "set sec auth key err!\n");
1142 if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
1143 (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
1144 dev_err(dev, "MAC or AUTH key length error!\n");
1151 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
1156 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
1157 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
1160 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
1163 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
1164 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
1165 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
1166 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
1167 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
1168 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
1169 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
1170 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
1171 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
1172 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
1173 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
1174 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
1175 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
1176 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
1178 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1180 struct aead_request *aq = req->aead_req.aead_req;
1182 return sec_cipher_map(ctx, req, aq->src, aq->dst);
1185 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1187 struct aead_request *aq = req->aead_req.aead_req;
1189 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1192 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1196 ret = ctx->req_op->buf_map(ctx, req);
1200 ctx->req_op->do_transfer(ctx, req);
1202 ret = ctx->req_op->bd_fill(ctx, req);
1209 ctx->req_op->buf_unmap(ctx, req);
1213 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1215 ctx->req_op->buf_unmap(ctx, req);
1218 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1220 struct skcipher_request *sk_req = req->c_req.sk_req;
1221 struct sec_cipher_req *c_req = &req->c_req;
1223 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1226 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1228 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1229 struct sec_cipher_req *c_req = &req->c_req;
1230 struct sec_sqe *sec_sqe = &req->sec_sqe;
1231 u8 scene, sa_type, da_type;
1235 memset(sec_sqe, 0, sizeof(struct sec_sqe));
1237 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1238 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1239 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1240 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1242 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1244 sec_sqe->type2.c_alg = c_ctx->c_alg;
1245 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1248 bd_type = SEC_BD_TYPE2;
1250 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1252 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1253 sec_sqe->type_cipher_auth = bd_type | cipher;
1255 /* Set destination and source address type */
1256 if (req->use_pbuf) {
1257 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1258 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1260 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1261 da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1264 sec_sqe->sdm_addr_type |= da_type;
1265 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1266 if (req->in_dma != c_req->c_out_dma)
1267 de = 0x1 << SEC_DE_OFFSET;
1269 sec_sqe->sds_sa_type = (de | scene | sa_type);
1271 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1272 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1277 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1279 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1280 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1281 struct sec_cipher_req *c_req = &req->c_req;
1285 memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
1287 sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1288 sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1289 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1290 sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1292 sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
1294 sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1295 SEC_CKEY_OFFSET_V3);
1298 cipher = SEC_CIPHER_ENC;
1300 cipher = SEC_CIPHER_DEC;
1301 sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
1303 if (req->use_pbuf) {
1304 bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
1305 bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
1307 bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
1308 bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
1311 bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
1312 if (req->in_dma != c_req->c_out_dma)
1313 bd_param |= 0x1 << SEC_DE_OFFSET_V3;
1315 bd_param |= SEC_BD_TYPE3;
1316 sec_sqe3->bd_param = cpu_to_le32(bd_param);
1318 sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
1319 sec_sqe3->tag = cpu_to_le64(req);
1324 /* increment counter (128-bit int) */
1325 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
1329 nums += counter[bits];
1330 counter[bits] = nums & BITS_MASK;
1332 } while (bits && nums);
1335 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1337 struct aead_request *aead_req = req->aead_req.aead_req;
1338 struct skcipher_request *sk_req = req->c_req.sk_req;
1339 u32 iv_size = req->ctx->c_ctx.ivsize;
1340 struct scatterlist *sgl;
1341 unsigned int cryptlen;
1345 if (req->c_req.encrypt)
1346 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1348 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1350 if (alg_type == SEC_SKCIPHER) {
1352 cryptlen = sk_req->cryptlen;
1355 cryptlen = aead_req->cryptlen;
1358 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1359 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1360 cryptlen - iv_size);
1361 if (unlikely(sz != iv_size))
1362 dev_err(req->ctx->dev, "copy output iv error!\n");
1364 sz = cryptlen / iv_size;
1365 if (cryptlen % iv_size)
1367 ctr_iv_inc(iv, iv_size, sz);
1371 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1372 struct sec_qp_ctx *qp_ctx)
1374 struct sec_req *backlog_req = NULL;
1376 mutex_lock(&qp_ctx->req_lock);
1377 if (ctx->fake_req_limit >=
1378 atomic_read(&qp_ctx->qp->qp_status.used) &&
1379 !list_empty(&qp_ctx->backlog)) {
1380 backlog_req = list_first_entry(&qp_ctx->backlog,
1381 typeof(*backlog_req), backlog_head);
1382 list_del(&backlog_req->backlog_head);
1384 mutex_unlock(&qp_ctx->req_lock);
1389 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1392 struct skcipher_request *sk_req = req->c_req.sk_req;
1393 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1394 struct skcipher_request *backlog_sk_req;
1395 struct sec_req *backlog_req;
1397 sec_free_req_id(req);
1399 /* IV output at encrypto of CBC/CTR mode */
1400 if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1401 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1402 sec_update_iv(req, SEC_SKCIPHER);
1405 backlog_req = sec_back_req_clear(ctx, qp_ctx);
1409 backlog_sk_req = backlog_req->c_req.sk_req;
1410 backlog_sk_req->base.complete(&backlog_sk_req->base,
1412 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1415 sk_req->base.complete(&sk_req->base, err);
1418 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1420 struct aead_request *aead_req = req->aead_req.aead_req;
1421 struct sec_cipher_req *c_req = &req->c_req;
1422 struct sec_aead_req *a_req = &req->aead_req;
1423 size_t authsize = ctx->a_ctx.mac_len;
1424 u32 data_size = aead_req->cryptlen;
1428 /* the specification has been checked in aead_iv_demension_check() */
1429 cl = c_req->c_ivin[0] + 1;
1430 c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
1431 memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
1432 c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
1434 /* the last 3bit is L' */
1435 flage |= c_req->c_ivin[0] & IV_CL_MASK;
1437 /* the M' is bit3~bit5, the Flags is bit6 */
1438 cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
1439 flage |= cm << IV_CM_OFFSET;
1440 if (aead_req->assoclen)
1441 flage |= 0x01 << IV_FLAGS_OFFSET;
1443 memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
1444 a_req->a_ivin[0] = flage;
1447 * the last 32bit is counter's initial number,
1448 * but the nonce uses the first 16bit
1449 * the tail 16bit fill with the cipher length
1451 if (!c_req->encrypt)
1452 data_size = aead_req->cryptlen - authsize;
1454 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
1455 data_size & IV_LAST_BYTE_MASK;
1456 data_size >>= IV_BYTE_OFFSET;
1457 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
1458 data_size & IV_LAST_BYTE_MASK;
1461 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1463 struct aead_request *aead_req = req->aead_req.aead_req;
1464 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1465 size_t authsize = crypto_aead_authsize(tfm);
1466 struct sec_cipher_req *c_req = &req->c_req;
1467 struct sec_aead_req *a_req = &req->aead_req;
1469 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1471 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
1473 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1474 * the counter must set to 0x01
1476 ctx->a_ctx.mac_len = authsize;
1477 /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
1478 set_aead_auth_iv(ctx, req);
1481 /* GCM 12Byte Cipher_IV == Auth_IV */
1482 if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
1483 ctx->a_ctx.mac_len = authsize;
1484 memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
1488 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
1489 struct sec_req *req, struct sec_sqe *sec_sqe)
1491 struct sec_aead_req *a_req = &req->aead_req;
1492 struct aead_request *aq = a_req->aead_req;
1494 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1495 sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
1497 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1498 sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
1499 sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1500 sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
1503 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1505 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1507 sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
1508 sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
1509 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1511 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1514 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
1515 struct sec_req *req, struct sec_sqe3 *sqe3)
1517 struct sec_aead_req *a_req = &req->aead_req;
1518 struct aead_request *aq = a_req->aead_req;
1520 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1521 sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
1523 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1524 sqe3->a_key_addr = sqe3->c_key_addr;
1525 sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1526 sqe3->auth_mac_key |= SEC_NO_AUTH;
1529 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1531 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1533 sqe3->a_len_key = cpu_to_le32(aq->assoclen);
1534 sqe3->auth_src_offset = cpu_to_le16(0x0);
1535 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1536 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1539 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1540 struct sec_req *req, struct sec_sqe *sec_sqe)
1542 struct sec_aead_req *a_req = &req->aead_req;
1543 struct sec_cipher_req *c_req = &req->c_req;
1544 struct aead_request *aq = a_req->aead_req;
1546 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1548 sec_sqe->type2.mac_key_alg =
1549 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1551 sec_sqe->type2.mac_key_alg |=
1552 cpu_to_le32((u32)((ctx->a_key_len) /
1553 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1555 sec_sqe->type2.mac_key_alg |=
1556 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1559 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1560 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1562 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1563 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1565 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1567 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1569 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1572 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1574 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1575 struct sec_sqe *sec_sqe = &req->sec_sqe;
1578 ret = sec_skcipher_bd_fill(ctx, req);
1579 if (unlikely(ret)) {
1580 dev_err(ctx->dev, "skcipher bd fill is error!\n");
1584 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1585 ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1586 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1588 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1593 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
1594 struct sec_req *req, struct sec_sqe3 *sqe3)
1596 struct sec_aead_req *a_req = &req->aead_req;
1597 struct sec_cipher_req *c_req = &req->c_req;
1598 struct aead_request *aq = a_req->aead_req;
1600 sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
1602 sqe3->auth_mac_key |=
1603 cpu_to_le32((u32)(ctx->mac_len /
1604 SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
1606 sqe3->auth_mac_key |=
1607 cpu_to_le32((u32)(ctx->a_key_len /
1608 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
1610 sqe3->auth_mac_key |=
1611 cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
1614 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1615 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1617 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1618 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1620 sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
1622 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1624 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1627 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1629 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1630 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1633 ret = sec_skcipher_bd_fill_v3(ctx, req);
1634 if (unlikely(ret)) {
1635 dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
1639 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1640 ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1641 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1644 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1650 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1652 struct aead_request *a_req = req->aead_req.aead_req;
1653 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1654 struct sec_aead_req *aead_req = &req->aead_req;
1655 struct sec_cipher_req *c_req = &req->c_req;
1656 size_t authsize = crypto_aead_authsize(tfm);
1657 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1658 struct aead_request *backlog_aead_req;
1659 struct sec_req *backlog_req;
1662 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1663 sec_update_iv(req, SEC_AEAD);
1665 /* Copy output mac */
1666 if (!err && c_req->encrypt) {
1667 struct scatterlist *sgl = a_req->dst;
1669 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1671 authsize, a_req->cryptlen +
1674 if (unlikely(sz != authsize)) {
1675 dev_err(c->dev, "copy out mac err!\n");
1680 sec_free_req_id(req);
1683 backlog_req = sec_back_req_clear(c, qp_ctx);
1687 backlog_aead_req = backlog_req->aead_req.aead_req;
1688 backlog_aead_req->base.complete(&backlog_aead_req->base,
1690 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1693 a_req->base.complete(&a_req->base, err);
1696 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1698 sec_free_req_id(req);
1699 sec_free_queue_id(ctx, req);
1702 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1704 struct sec_qp_ctx *qp_ctx;
1707 /* To load balance */
1708 queue_id = sec_alloc_queue_id(ctx, req);
1709 qp_ctx = &ctx->qp_ctx[queue_id];
1711 req->req_id = sec_alloc_req_id(req, qp_ctx);
1712 if (unlikely(req->req_id < 0)) {
1713 sec_free_queue_id(ctx, req);
1720 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1722 struct sec_cipher_req *c_req = &req->c_req;
1725 ret = sec_request_init(ctx, req);
1729 ret = sec_request_transfer(ctx, req);
1731 goto err_uninit_req;
1733 /* Output IV as decrypto */
1734 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1735 ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1736 sec_update_iv(req, ctx->alg_type);
1738 ret = ctx->req_op->bd_send(ctx, req);
1739 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1740 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1741 dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1748 /* As failing, restore the IV from user */
1749 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1750 if (ctx->alg_type == SEC_SKCIPHER)
1751 memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1754 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1758 sec_request_untransfer(ctx, req);
1760 sec_request_uninit(ctx, req);
1764 static const struct sec_req_op sec_skcipher_req_ops = {
1765 .buf_map = sec_skcipher_sgl_map,
1766 .buf_unmap = sec_skcipher_sgl_unmap,
1767 .do_transfer = sec_skcipher_copy_iv,
1768 .bd_fill = sec_skcipher_bd_fill,
1769 .bd_send = sec_bd_send,
1770 .callback = sec_skcipher_callback,
1771 .process = sec_process,
1774 static const struct sec_req_op sec_aead_req_ops = {
1775 .buf_map = sec_aead_sgl_map,
1776 .buf_unmap = sec_aead_sgl_unmap,
1777 .do_transfer = sec_aead_set_iv,
1778 .bd_fill = sec_aead_bd_fill,
1779 .bd_send = sec_bd_send,
1780 .callback = sec_aead_callback,
1781 .process = sec_process,
1784 static const struct sec_req_op sec_skcipher_req_ops_v3 = {
1785 .buf_map = sec_skcipher_sgl_map,
1786 .buf_unmap = sec_skcipher_sgl_unmap,
1787 .do_transfer = sec_skcipher_copy_iv,
1788 .bd_fill = sec_skcipher_bd_fill_v3,
1789 .bd_send = sec_bd_send,
1790 .callback = sec_skcipher_callback,
1791 .process = sec_process,
1794 static const struct sec_req_op sec_aead_req_ops_v3 = {
1795 .buf_map = sec_aead_sgl_map,
1796 .buf_unmap = sec_aead_sgl_unmap,
1797 .do_transfer = sec_aead_set_iv,
1798 .bd_fill = sec_aead_bd_fill_v3,
1799 .bd_send = sec_bd_send,
1800 .callback = sec_aead_callback,
1801 .process = sec_process,
1804 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1806 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1809 ret = sec_skcipher_init(tfm);
1813 if (ctx->sec->qm.ver < QM_HW_V3) {
1814 ctx->type_supported = SEC_BD_TYPE2;
1815 ctx->req_op = &sec_skcipher_req_ops;
1817 ctx->type_supported = SEC_BD_TYPE3;
1818 ctx->req_op = &sec_skcipher_req_ops_v3;
1824 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1826 sec_skcipher_uninit(tfm);
1829 static int sec_aead_init(struct crypto_aead *tfm)
1831 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1834 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1835 ctx->alg_type = SEC_AEAD;
1836 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1837 if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
1838 ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1839 pr_err("get error aead iv size!\n");
1843 ret = sec_ctx_base_init(ctx);
1846 if (ctx->sec->qm.ver < QM_HW_V3) {
1847 ctx->type_supported = SEC_BD_TYPE2;
1848 ctx->req_op = &sec_aead_req_ops;
1850 ctx->type_supported = SEC_BD_TYPE3;
1851 ctx->req_op = &sec_aead_req_ops_v3;
1854 ret = sec_auth_init(ctx);
1858 ret = sec_cipher_init(ctx);
1860 goto err_cipher_init;
1865 sec_auth_uninit(ctx);
1867 sec_ctx_base_uninit(ctx);
1871 static void sec_aead_exit(struct crypto_aead *tfm)
1873 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1875 sec_cipher_uninit(ctx);
1876 sec_auth_uninit(ctx);
1877 sec_ctx_base_uninit(ctx);
1880 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1882 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1883 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1886 ret = sec_aead_init(tfm);
1888 pr_err("hisi_sec2: aead init error!\n");
1892 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1893 if (IS_ERR(auth_ctx->hash_tfm)) {
1894 dev_err(ctx->dev, "aead alloc shash error!\n");
1896 return PTR_ERR(auth_ctx->hash_tfm);
1902 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1904 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1906 crypto_free_shash(ctx->a_ctx.hash_tfm);
1910 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
1912 struct aead_alg *alg = crypto_aead_alg(tfm);
1913 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1914 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1915 const char *aead_name = alg->base.cra_name;
1918 ret = sec_aead_init(tfm);
1920 dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
1924 a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
1925 CRYPTO_ALG_NEED_FALLBACK |
1927 if (IS_ERR(a_ctx->fallback_aead_tfm)) {
1928 dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
1930 return PTR_ERR(a_ctx->fallback_aead_tfm);
1932 a_ctx->fallback = false;
1937 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
1939 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1941 crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1945 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1947 return sec_aead_ctx_init(tfm, "sha1");
1950 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1952 return sec_aead_ctx_init(tfm, "sha256");
1955 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1957 return sec_aead_ctx_init(tfm, "sha512");
1961 static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
1962 struct sec_req *sreq)
1964 u32 cryptlen = sreq->c_req.sk_req->cryptlen;
1965 struct device *dev = ctx->dev;
1966 u8 c_mode = ctx->c_ctx.c_mode;
1971 if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
1972 dev_err(dev, "skcipher XTS mode input length error!\n");
1978 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
1979 dev_err(dev, "skcipher AES input length error!\n");
1986 if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
1987 dev_err(dev, "skcipher HW version error!\n");
1998 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2000 struct skcipher_request *sk_req = sreq->c_req.sk_req;
2001 struct device *dev = ctx->dev;
2002 u8 c_alg = ctx->c_ctx.c_alg;
2004 if (unlikely(!sk_req->src || !sk_req->dst ||
2005 sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
2006 dev_err(dev, "skcipher input param error!\n");
2009 sreq->c_req.c_len = sk_req->cryptlen;
2011 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
2012 sreq->use_pbuf = true;
2014 sreq->use_pbuf = false;
2016 if (c_alg == SEC_CALG_3DES) {
2017 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2018 dev_err(dev, "skcipher 3des input length error!\n");
2022 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2023 return sec_skcipher_cryptlen_ckeck(ctx, sreq);
2026 dev_err(dev, "skcipher algorithm error!\n");
2031 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
2032 struct skcipher_request *sreq, bool encrypt)
2034 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
2035 struct device *dev = ctx->dev;
2038 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
2040 if (!c_ctx->fbtfm) {
2041 dev_err(dev, "failed to check fallback tfm\n");
2045 skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
2047 /* software need sync mode to do crypto */
2048 skcipher_request_set_callback(subreq, sreq->base.flags,
2050 skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
2051 sreq->cryptlen, sreq->iv);
2053 ret = crypto_skcipher_encrypt(subreq);
2055 ret = crypto_skcipher_decrypt(subreq);
2057 skcipher_request_zero(subreq);
2062 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2064 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
2065 struct sec_req *req = skcipher_request_ctx(sk_req);
2066 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2069 if (!sk_req->cryptlen) {
2070 if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
2075 req->flag = sk_req->base.flags;
2076 req->c_req.sk_req = sk_req;
2077 req->c_req.encrypt = encrypt;
2080 ret = sec_skcipher_param_check(ctx, req);
2084 if (unlikely(ctx->c_ctx.fallback))
2085 return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
2087 return ctx->req_op->process(ctx, req);
2090 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
2092 return sec_skcipher_crypto(sk_req, true);
2095 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
2097 return sec_skcipher_crypto(sk_req, false);
2100 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
2101 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
2104 .cra_name = sec_cra_name,\
2105 .cra_driver_name = "hisi_sec_"sec_cra_name,\
2106 .cra_priority = SEC_PRIORITY,\
2107 .cra_flags = CRYPTO_ALG_ASYNC |\
2108 CRYPTO_ALG_ALLOCATES_MEMORY |\
2109 CRYPTO_ALG_NEED_FALLBACK,\
2110 .cra_blocksize = blk_size,\
2111 .cra_ctxsize = sizeof(struct sec_ctx),\
2112 .cra_module = THIS_MODULE,\
2116 .setkey = sec_set_key,\
2117 .decrypt = sec_skcipher_decrypt,\
2118 .encrypt = sec_skcipher_encrypt,\
2119 .min_keysize = sec_min_key_size,\
2120 .max_keysize = sec_max_key_size,\
2124 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
2125 max_key_size, blk_size, iv_size) \
2126 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
2127 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
2129 static struct skcipher_alg sec_skciphers[] = {
2130 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
2131 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2134 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
2135 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2136 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2138 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
2139 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
2140 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2142 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
2143 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
2144 DES3_EDE_BLOCK_SIZE, 0)
2146 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
2147 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
2148 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
2150 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
2151 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
2152 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2154 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
2155 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2156 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2159 static struct skcipher_alg sec_skciphers_v3[] = {
2160 SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
2161 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2162 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2164 SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
2165 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2166 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2168 SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
2169 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2170 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2172 SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
2173 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2174 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2176 SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
2177 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2178 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2180 SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
2181 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2182 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2185 static int aead_iv_demension_check(struct aead_request *aead_req)
2189 cl = aead_req->iv[0] + 1;
2190 if (cl < IV_CL_MIN || cl > IV_CL_MAX)
2193 if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
2199 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2201 struct aead_request *req = sreq->aead_req.aead_req;
2202 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2203 size_t authsize = crypto_aead_authsize(tfm);
2204 u8 c_mode = ctx->c_ctx.c_mode;
2205 struct device *dev = ctx->dev;
2208 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2209 req->assoclen > SEC_MAX_AAD_LEN)) {
2210 dev_err(dev, "aead input spec error!\n");
2214 if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
2215 (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
2216 authsize & MAC_LEN_MASK)))) {
2217 dev_err(dev, "aead input mac length error!\n");
2221 if (c_mode == SEC_CMODE_CCM) {
2222 ret = aead_iv_demension_check(req);
2224 dev_err(dev, "aead input iv param error!\n");
2229 if (sreq->c_req.encrypt)
2230 sreq->c_req.c_len = req->cryptlen;
2232 sreq->c_req.c_len = req->cryptlen - authsize;
2233 if (c_mode == SEC_CMODE_CBC) {
2234 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2235 dev_err(dev, "aead crypto length error!\n");
2243 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2245 struct aead_request *req = sreq->aead_req.aead_req;
2246 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2247 size_t authsize = crypto_aead_authsize(tfm);
2248 struct device *dev = ctx->dev;
2249 u8 c_alg = ctx->c_ctx.c_alg;
2251 if (unlikely(!req->src || !req->dst)) {
2252 dev_err(dev, "aead input param error!\n");
2256 if (ctx->sec->qm.ver == QM_HW_V2) {
2257 if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
2258 req->cryptlen <= authsize))) {
2259 dev_err(dev, "Kunpeng920 not support 0 length!\n");
2260 ctx->a_ctx.fallback = true;
2265 /* Support AES or SM4 */
2266 if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
2267 dev_err(dev, "aead crypto alg error!\n");
2271 if (unlikely(sec_aead_spec_check(ctx, sreq)))
2274 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2276 sreq->use_pbuf = true;
2278 sreq->use_pbuf = false;
2283 static int sec_aead_soft_crypto(struct sec_ctx *ctx,
2284 struct aead_request *aead_req,
2287 struct aead_request *subreq = aead_request_ctx(aead_req);
2288 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2289 struct device *dev = ctx->dev;
2291 /* Kunpeng920 aead mode not support input 0 size */
2292 if (!a_ctx->fallback_aead_tfm) {
2293 dev_err(dev, "aead fallback tfm is NULL!\n");
2297 aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
2298 aead_request_set_callback(subreq, aead_req->base.flags,
2299 aead_req->base.complete, aead_req->base.data);
2300 aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
2301 aead_req->cryptlen, aead_req->iv);
2302 aead_request_set_ad(subreq, aead_req->assoclen);
2304 return encrypt ? crypto_aead_encrypt(subreq) :
2305 crypto_aead_decrypt(subreq);
2308 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2310 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2311 struct sec_req *req = aead_request_ctx(a_req);
2312 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2315 req->flag = a_req->base.flags;
2316 req->aead_req.aead_req = a_req;
2317 req->c_req.encrypt = encrypt;
2320 ret = sec_aead_param_check(ctx, req);
2321 if (unlikely(ret)) {
2322 if (ctx->a_ctx.fallback)
2323 return sec_aead_soft_crypto(ctx, a_req, encrypt);
2327 return ctx->req_op->process(ctx, req);
2330 static int sec_aead_encrypt(struct aead_request *a_req)
2332 return sec_aead_crypto(a_req, true);
2335 static int sec_aead_decrypt(struct aead_request *a_req)
2337 return sec_aead_crypto(a_req, false);
2340 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2341 ctx_exit, blk_size, iv_size, max_authsize)\
2344 .cra_name = sec_cra_name,\
2345 .cra_driver_name = "hisi_sec_"sec_cra_name,\
2346 .cra_priority = SEC_PRIORITY,\
2347 .cra_flags = CRYPTO_ALG_ASYNC |\
2348 CRYPTO_ALG_ALLOCATES_MEMORY |\
2349 CRYPTO_ALG_NEED_FALLBACK,\
2350 .cra_blocksize = blk_size,\
2351 .cra_ctxsize = sizeof(struct sec_ctx),\
2352 .cra_module = THIS_MODULE,\
2356 .setkey = sec_set_key,\
2357 .setauthsize = sec_aead_setauthsize,\
2358 .decrypt = sec_aead_decrypt,\
2359 .encrypt = sec_aead_encrypt,\
2361 .maxauthsize = max_authsize,\
2364 static struct aead_alg sec_aeads[] = {
2365 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
2366 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
2367 sec_aead_ctx_exit, AES_BLOCK_SIZE,
2368 AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2370 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
2371 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
2372 sec_aead_ctx_exit, AES_BLOCK_SIZE,
2373 AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2375 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
2376 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
2377 sec_aead_ctx_exit, AES_BLOCK_SIZE,
2378 AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
2380 SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
2381 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2382 AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2384 SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
2385 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2386 SEC_AIV_SIZE, AES_BLOCK_SIZE)
2389 static struct aead_alg sec_aeads_v3[] = {
2390 SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
2391 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2392 AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2394 SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
2395 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2396 SEC_AIV_SIZE, AES_BLOCK_SIZE)
2399 int sec_register_to_crypto(struct hisi_qm *qm)
2403 /* To avoid repeat register */
2404 ret = crypto_register_skciphers(sec_skciphers,
2405 ARRAY_SIZE(sec_skciphers));
2409 if (qm->ver > QM_HW_V2) {
2410 ret = crypto_register_skciphers(sec_skciphers_v3,
2411 ARRAY_SIZE(sec_skciphers_v3));
2413 goto reg_skcipher_fail;
2416 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
2419 if (qm->ver > QM_HW_V2) {
2420 ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
2422 goto reg_aead_v3_fail;
2427 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
2429 if (qm->ver > QM_HW_V2)
2430 crypto_unregister_skciphers(sec_skciphers_v3,
2431 ARRAY_SIZE(sec_skciphers_v3));
2433 crypto_unregister_skciphers(sec_skciphers,
2434 ARRAY_SIZE(sec_skciphers));
2438 void sec_unregister_from_crypto(struct hisi_qm *qm)
2440 if (qm->ver > QM_HW_V2)
2441 crypto_unregister_aeads(sec_aeads_v3,
2442 ARRAY_SIZE(sec_aeads_v3));
2443 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
2445 if (qm->ver > QM_HW_V2)
2446 crypto_unregister_skciphers(sec_skciphers_v3,
2447 ARRAY_SIZE(sec_skciphers_v3));
2448 crypto_unregister_skciphers(sec_skciphers,
2449 ARRAY_SIZE(sec_skciphers));