crypto: ccree - don't mangle the request assoclen
authorGilad Ben-Yossef <gilad@benyossef.com>
Thu, 18 Apr 2019 13:38:59 +0000 (16:38 +0300)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 25 Apr 2019 07:38:15 +0000 (15:38 +0800)
We were mangling the request struct assoclen field.
Fix it by keeping an internal version and working on it.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/ccree/cc_aead.c
drivers/crypto/ccree/cc_aead.h
drivers/crypto/ccree/cc_buffer_mgr.c

index 0141c67808108eb3a16f568edf3e779fc0d57cde..a49814d297147ff4f1b09d79fafec561da484ea5 100644 (file)
@@ -764,7 +764,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
                dev_dbg(dev, "ASSOC buffer type DLLI\n");
                hw_desc_init(&desc[idx]);
                set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
-                            areq->assoclen, NS_BIT);
+                            areq_ctx->assoclen, NS_BIT);
                set_flow_mode(&desc[idx], flow_mode);
                if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
                    areq_ctx->cryptlen > 0)
@@ -1113,9 +1113,11 @@ static void cc_proc_header_desc(struct aead_request *req,
                                struct cc_hw_desc desc[],
                                unsigned int *seq_size)
 {
+       struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        unsigned int idx = *seq_size;
+
        /* Hash associated data */
-       if (req->assoclen > 0)
+       if (areq_ctx->assoclen > 0)
                cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
 
        /* Hash IV */
@@ -1343,7 +1345,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        struct device *dev = drvdata_to_dev(ctx->drvdata);
-       unsigned int assoclen = req->assoclen;
+       unsigned int assoclen = areq_ctx->assoclen;
        unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
                        (req->cryptlen - ctx->authsize) : req->cryptlen;
 
@@ -1502,7 +1504,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
        idx++;
 
        /* process assoc data */
-       if (req->assoclen > 0) {
+       if (req_ctx->assoclen > 0) {
                cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
        } else {
                hw_desc_init(&desc[idx]);
@@ -1594,7 +1596,7 @@ static int config_ccm_adata(struct aead_request *req)
         * NIST Special Publication 800-38C
         */
        *b0 |= (8 * ((m - 2) / 2));
-       if (req->assoclen > 0)
+       if (req_ctx->assoclen > 0)
                *b0 |= 64;  /* Enable bit 6 if Adata exists. */
 
        rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
@@ -1605,7 +1607,7 @@ static int config_ccm_adata(struct aead_request *req)
         /* END of "taken from crypto/ccm.c" */
 
        /* l(a) - size of associated data. */
-       req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+       req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
 
        memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
        req->iv[15] = 1;
@@ -1637,7 +1639,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
        memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
               CCM_BLOCK_IV_SIZE);
        req->iv = areq_ctx->ctr_iv;
-       req->assoclen -= CCM_BLOCK_IV_SIZE;
+       areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
 }
 
 static void cc_set_ghash_desc(struct aead_request *req,
@@ -1845,7 +1847,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
        // for gcm and rfc4106.
        cc_set_ghash_desc(req, desc, seq_size);
        /* process(ghash) assoc data */
-       if (req->assoclen > 0)
+       if (req_ctx->assoclen > 0)
                cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
        cc_set_gctr_desc(req, desc, seq_size);
        /* process(gctr+ghash) */
@@ -1869,8 +1871,8 @@ static int config_gcm_context(struct aead_request *req)
                                (req->cryptlen - ctx->authsize);
        __be32 counter = cpu_to_be32(2);
 
-       dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
-               __func__, cryptlen, req->assoclen, ctx->authsize);
+       dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+               __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
 
        memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
 
@@ -1886,7 +1888,7 @@ static int config_gcm_context(struct aead_request *req)
        if (!req_ctx->plaintext_authenticate_only) {
                __be64 temp64;
 
-               temp64 = cpu_to_be64(req->assoclen * 8);
+               temp64 = cpu_to_be64(req_ctx->assoclen * 8);
                memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
                temp64 = cpu_to_be64(cryptlen * 8);
                memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1896,8 +1898,8 @@ static int config_gcm_context(struct aead_request *req)
                 */
                __be64 temp64;
 
-               temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
-                                     cryptlen) * 8);
+               temp64 = cpu_to_be64((req_ctx->assoclen +
+                                     GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
                memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
                temp64 = 0;
                memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1917,7 +1919,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
        memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
               GCM_BLOCK_RFC4_IV_SIZE);
        req->iv = areq_ctx->ctr_iv;
-       req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+       areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
 }
 
 static int cc_proc_aead(struct aead_request *req,
@@ -1942,7 +1944,7 @@ static int cc_proc_aead(struct aead_request *req,
        /* Check data length according to mode */
        if (validate_data_size(ctx, direct, req)) {
                dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
-                       req->cryptlen, req->assoclen);
+                       req->cryptlen, areq_ctx->assoclen);
                crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
                return -EINVAL;
        }
@@ -2095,6 +2097,7 @@ static int cc_aead_encrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
        areq_ctx->is_gcm4543 = false;
 
@@ -2126,6 +2129,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
        areq_ctx->is_gcm4543 = true;
 
@@ -2147,6 +2151,7 @@ static int cc_aead_decrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
        areq_ctx->is_gcm4543 = false;
 
@@ -2176,6 +2181,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
 
        areq_ctx->is_gcm4543 = true;
@@ -2295,6 +2301,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
 
        areq_ctx->plaintext_authenticate_only = false;
@@ -2323,6 +2330,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
 
        cc_proc_rfc4_gcm(req);
@@ -2354,6 +2362,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
 
        areq_ctx->plaintext_authenticate_only = false;
@@ -2382,6 +2391,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
 
        /* No generated IV required */
        areq_ctx->backup_iv = req->iv;
+       areq_ctx->assoclen = req->assoclen;
        areq_ctx->backup_giv = NULL;
 
        cc_proc_rfc4_gcm(req);
index 6cfae15f85e5c183c7b706aa0348bb23a369b924..e51724b96c564b3b2c8df103ae8f87519741a20b 100644 (file)
@@ -67,6 +67,7 @@ struct aead_req_ctx {
        u8 backup_mac[MAX_MAC_SIZE];
        u8 *backup_iv; /*store iv for generated IV flow*/
        u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+       u32 assoclen; /* internal assoclen */
        dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
        /* buffer for internal ccm configurations */
        dma_addr_t ccm_iv0_dma_addr;
index 8554cfb2963ad8615e5932f51426b668727f200f..86e498bee0bb1a19e9d671607f661dc7572c3a6b 100644 (file)
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       u32 skip = req->assoclen + req->cryptlen;
+       u32 skip = areq_ctx->assoclen + req->cryptlen;
 
        if (areq_ctx->is_gcm4543)
                skip += crypto_aead_ivsize(tfm);
@@ -575,8 +575,8 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 
        dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
                sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
-               req->assoclen, req->cryptlen);
-       size_to_unmap = req->assoclen + req->cryptlen;
+               areq_ctx->assoclen, req->cryptlen);
+       size_to_unmap = areq_ctx->assoclen + req->cryptlen;
        if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
                size_to_unmap += areq_ctx->req_authsize;
        if (areq_ctx->is_gcm4543)
@@ -663,7 +663,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
        struct scatterlist *current_sg = req->src;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        unsigned int sg_index = 0;
-       u32 size_of_assoc = req->assoclen;
+       u32 size_of_assoc = areq_ctx->assoclen;
        struct device *dev = drvdata_to_dev(drvdata);
 
        if (areq_ctx->is_gcm4543)
@@ -674,7 +674,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
                goto chain_assoc_exit;
        }
 
-       if (req->assoclen == 0) {
+       if (areq_ctx->assoclen == 0) {
                areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
                areq_ctx->assoc.nents = 0;
                areq_ctx->assoc.mlli_nents = 0;
@@ -734,7 +734,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
                        cc_dma_buf_type(areq_ctx->assoc_buff_type),
                        areq_ctx->assoc.nents);
                cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
-                               req->assoclen, 0, is_last,
+                               areq_ctx->assoclen, 0, is_last,
                                &areq_ctx->assoc.mlli_nents);
                areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
        }
@@ -893,11 +893,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
        u32 src_mapped_nents = 0, dst_mapped_nents = 0;
        u32 offset = 0;
        /* non-inplace mode */
-       unsigned int size_for_map = req->assoclen + req->cryptlen;
+       unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        u32 sg_index = 0;
        bool is_gcm4543 = areq_ctx->is_gcm4543;
-       u32 size_to_skip = req->assoclen;
+       u32 size_to_skip = areq_ctx->assoclen;
 
        if (is_gcm4543)
                size_to_skip += crypto_aead_ivsize(tfm);
@@ -941,7 +941,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
        areq_ctx->src_offset = offset;
 
        if (req->src != req->dst) {
-               size_for_map = req->assoclen + req->cryptlen;
+               size_for_map = areq_ctx->assoclen + req->cryptlen;
                size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
                                authsize : 0;
                if (is_gcm4543)
@@ -1107,7 +1107,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
                areq_ctx->ccm_iv0_dma_addr = dma_addr;
 
                rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
-                                         &sg_data, req->assoclen);
+                                         &sg_data, areq_ctx->assoclen);
                if (rc)
                        goto aead_map_failure;
        }
@@ -1158,7 +1158,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
                areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
        }
 
-       size_to_map = req->cryptlen + req->assoclen;
+       size_to_map = req->cryptlen + areq_ctx->assoclen;
        if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
                size_to_map += authsize;