Merge tag 'gpio-v4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[sfrench/cifs-2.6.git] / drivers / block / drbd / drbd_worker.c
index b8f77e83d456292abac70d8f89360a3697e8e700..99255d0c9e2ffab9bde0500b14ed446c5bce281d 100644 (file)
@@ -152,7 +152,7 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
 
        do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
 
-       /* FIXME do we want to detach for failed REQ_DISCARD?
+       /* FIXME do we want to detach for failed REQ_OP_DISCARD?
         * ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */
        if (peer_req->flags & EE_WAS_ERROR)
                __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
@@ -295,60 +295,61 @@ void drbd_request_endio(struct bio *bio)
                complete_master_bio(device, &m);
 }
 
-void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
 {
-       AHASH_REQUEST_ON_STACK(req, tfm);
-       struct scatterlist sg;
+       SHASH_DESC_ON_STACK(desc, tfm);
        struct page *page = peer_req->pages;
        struct page *tmp;
        unsigned len;
+       void *src;
 
-       ahash_request_set_tfm(req, tfm);
-       ahash_request_set_callback(req, 0, NULL, NULL);
+       desc->tfm = tfm;
+       desc->flags = 0;
 
-       sg_init_table(&sg, 1);
-       crypto_ahash_init(req);
+       crypto_shash_init(desc);
 
+       src = kmap_atomic(page);
        while ((tmp = page_chain_next(page))) {
                /* all but the last page will be fully used */
-               sg_set_page(&sg, page, PAGE_SIZE, 0);
-               ahash_request_set_crypt(req, &sg, NULL, sg.length);
-               crypto_ahash_update(req);
+               crypto_shash_update(desc, src, PAGE_SIZE);
+               kunmap_atomic(src);
                page = tmp;
+               src = kmap_atomic(page);
        }
        /* and now the last, possibly only partially used page */
        len = peer_req->i.size & (PAGE_SIZE - 1);
-       sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
-       ahash_request_set_crypt(req, &sg, digest, sg.length);
-       crypto_ahash_finup(req);
-       ahash_request_zero(req);
+       crypto_shash_update(desc, src, len ?: PAGE_SIZE);
+       kunmap_atomic(src);
+
+       crypto_shash_final(desc, digest);
+       shash_desc_zero(desc);
 }
 
-void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
 {
-       AHASH_REQUEST_ON_STACK(req, tfm);
-       struct scatterlist sg;
+       SHASH_DESC_ON_STACK(desc, tfm);
        struct bio_vec bvec;
        struct bvec_iter iter;
 
-       ahash_request_set_tfm(req, tfm);
-       ahash_request_set_callback(req, 0, NULL, NULL);
+       desc->tfm = tfm;
+       desc->flags = 0;
 
-       sg_init_table(&sg, 1);
-       crypto_ahash_init(req);
+       crypto_shash_init(desc);
 
        bio_for_each_segment(bvec, bio, iter) {
-               sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
-               ahash_request_set_crypt(req, &sg, NULL, sg.length);
-               crypto_ahash_update(req);
+               u8 *src;
+
+               src = kmap_atomic(bvec.bv_page);
+               crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
+               kunmap_atomic(src);
+
                /* REQ_OP_WRITE_SAME has only one segment,
                 * checksum the payload only once. */
                if (bio_op(bio) == REQ_OP_WRITE_SAME)
                        break;
        }
-       ahash_request_set_crypt(req, NULL, digest, 0);
-       crypto_ahash_final(req);
-       ahash_request_zero(req);
+       crypto_shash_final(desc, digest);
+       shash_desc_zero(desc);
 }
 
 /* MAYBE merge common code with w_e_end_ov_req */
@@ -367,7 +368,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
        if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
                goto out;
 
-       digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+       digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
        digest = kmalloc(digest_size, GFP_NOIO);
        if (digest) {
                sector_t sector = peer_req->i.sector;
@@ -1205,7 +1206,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
                 * a real fix would be much more involved,
                 * introducing more locking mechanisms */
                if (peer_device->connection->csums_tfm) {
-                       digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+                       digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
                        D_ASSERT(device, digest_size == di->digest_size);
                        digest = kmalloc(digest_size, GFP_NOIO);
                }
@@ -1255,7 +1256,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
        if (unlikely(cancel))
                goto out;
 
-       digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+       digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
        digest = kmalloc(digest_size, GFP_NOIO);
        if (!digest) {
                err = 1;        /* terminate the connection in case the allocation failed */
@@ -1327,7 +1328,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
        di = peer_req->digest;
 
        if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
-               digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+               digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
                digest = kmalloc(digest_size, GFP_NOIO);
                if (digest) {
                        drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);