crypto: caam - fix DMA API mapping leak in ablkcipher code
authorHoria Geantă <horia.geanta@nxp.com>
Wed, 9 Nov 2016 08:46:20 +0000 (10:46 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Sun, 13 Nov 2016 09:45:13 +0000 (17:45 +0800)
alkcipher_edesc_alloc() and ablkcipher_giv_edesc_alloc() don't
free / unmap resources on error path:
- dmap_map_sg() could fail, thus make sure the return value is checked
- unmap DMA mappings in case of error

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/caam/caamalg.c

index 34dfcdb7ff84aa8605b724b6e1ce0231f5dac052..8db54b090d393c477257fb7786cb3a38ebf58889 100644 (file)
@@ -2600,16 +2600,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
                                 DMA_BIDIRECTIONAL);
+               if (unlikely(!sgc)) {
+                       dev_err(jrdev, "unable to map source\n");
+                       return ERR_PTR(-ENOMEM);
+               }
        } else {
                sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
                                 DMA_TO_DEVICE);
+               if (unlikely(!sgc)) {
+                       dev_err(jrdev, "unable to map source\n");
+                       return ERR_PTR(-ENOMEM);
+               }
+
                sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
                                 DMA_FROM_DEVICE);
+               if (unlikely(!sgc)) {
+                       dev_err(jrdev, "unable to map destination\n");
+                       dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+                                    DMA_TO_DEVICE);
+                       return ERR_PTR(-ENOMEM);
+               }
        }
 
        iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, iv_dma)) {
                dev_err(jrdev, "unable to map IV\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -2629,6 +2646,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
                        GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+                          iv_dma, ivsize, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -2655,6 +2674,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
                                            sec4_sg_bytes, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
                dev_err(jrdev, "unable to map S/G table\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+                          iv_dma, ivsize, 0, 0);
+               kfree(edesc);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -2776,11 +2798,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
                                 DMA_BIDIRECTIONAL);
+               if (unlikely(!sgc)) {
+                       dev_err(jrdev, "unable to map source\n");
+                       return ERR_PTR(-ENOMEM);
+               }
        } else {
                sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
                                 DMA_TO_DEVICE);
+               if (unlikely(!sgc)) {
+                       dev_err(jrdev, "unable to map source\n");
+                       return ERR_PTR(-ENOMEM);
+               }
+
                sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
                                 DMA_FROM_DEVICE);
+               if (unlikely(!sgc)) {
+                       dev_err(jrdev, "unable to map destination\n");
+                       dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+                                    DMA_TO_DEVICE);
+                       return ERR_PTR(-ENOMEM);
+               }
        }
 
        /*
@@ -2790,6 +2827,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
        iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, iv_dma)) {
                dev_err(jrdev, "unable to map IV\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+                          0, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -2805,6 +2844,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
                        GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+                          iv_dma, ivsize, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -2832,6 +2873,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
                                            sec4_sg_bytes, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
                dev_err(jrdev, "unable to map S/G table\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+                          iv_dma, ivsize, 0, 0);
+               kfree(edesc);
                return ERR_PTR(-ENOMEM);
        }
        edesc->iv_dma = iv_dma;