ntfs_copy_from_user_iter(): don't bother with copying iov_iter
authorAl Viro <viro@zeniv.linux.org.uk>
Mon, 31 May 2021 02:53:43 +0000 (22:53 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Wed, 2 Jun 2021 21:50:38 +0000 (17:50 -0400)
Advance the original, let the caller revert if it needs to.
Don't mess with iov_iter_single_seg_count() in the caller -
if we got a (non-zero) short copy, use the amount actually
copied for the next pass, limit it to "up to the end
of page" if nothing got copied at all.

Originally fault-in only read the first iovec; back then it used
to make sense to limit to the just one iovec for the pass after
short copy.  These days it's no long true.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
fs/ntfs/file.c

index e5aab265dff197e7c7ed3512e866f2c9515c7c87..0666d4578137d732cfbd16b344d8fa62b1bdf7bc 100644 (file)
@@ -1684,20 +1684,19 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
 {
        struct page **last_page = pages + nr_pages;
        size_t total = 0;
-       struct iov_iter data = *i;
        unsigned len, copied;
 
        do {
                len = PAGE_SIZE - ofs;
                if (len > bytes)
                        len = bytes;
-               copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
+               copied = iov_iter_copy_from_user_atomic(*pages, i, ofs,
                                len);
+               iov_iter_advance(i, copied);
                total += copied;
                bytes -= copied;
                if (!bytes)
                        break;
-               iov_iter_advance(&data, copied);
                if (copied < len)
                        goto err;
                ofs = 0;
@@ -1866,34 +1865,24 @@ again:
                if (likely(copied == bytes)) {
                        status = ntfs_commit_pages_after_write(pages, do_pages,
                                        pos, bytes);
-                       if (!status)
-                               status = bytes;
                }
                do {
                        unlock_page(pages[--do_pages]);
                        put_page(pages[do_pages]);
                } while (do_pages);
-               if (unlikely(status < 0))
+               if (unlikely(status < 0)) {
+                       iov_iter_revert(i, copied);
                        break;
-               copied = status;
+               }
                cond_resched();
-               if (unlikely(!copied)) {
-                       size_t sc;
-
-                       /*
-                        * We failed to copy anything.  Fall back to single
-                        * segment length write.
-                        *
-                        * This is needed to avoid possible livelock in the
-                        * case that all segments in the iov cannot be copied
-                        * at once without a pagefault.
-                        */
-                       sc = iov_iter_single_seg_count(i);
-                       if (bytes > sc)
-                               bytes = sc;
+               if (unlikely(copied < bytes)) {
+                       iov_iter_revert(i, copied);
+                       if (copied)
+                               bytes = copied;
+                       else if (bytes > PAGE_SIZE - ofs)
+                               bytes = PAGE_SIZE - ofs;
                        goto again;
                }
-               iov_iter_advance(i, copied);
                pos += copied;
                written += copied;
                balance_dirty_pages_ratelimited(mapping);