Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Mar 2019 20:43:42 +0000 (13:43 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Mar 2019 20:43:42 +0000 (13:43 -0700)
Pull iov_iter updates from Al Viro:
 "A couple of iov_iter patches - Christoph's crapectomy (the last
  remaining user of iov_for_each() went away with lustre, IIRC) and
  Eric'c optimization of sanity checks"

* 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  iov_iter: optimize page_copy_sane()
  uio: remove the unused iov_for_each macro

.clang-format
include/linux/uio.h
lib/iov_iter.c

index 201a4f531b90a5ad360ac2f8b6cec374a7c05d44..f49620f506f17a95bda75dd4cbfd2a544ee0a8b4 100644 (file)
@@ -290,7 +290,6 @@ ForEachMacros:
   - 'idr_for_each_entry_ul'
   - 'inet_bind_bucket_for_each'
   - 'inet_lhash2_for_each_icsk_rcu'
-  - 'iov_for_each'
   - 'key_for_each'
   - 'key_for_each_safe'
   - 'klp_for_each_func'
index ecf584f6b82d051fd698ea60605afc3e814be269..87477e1640f9217223f7cbcde6b3fa416ef58ac5 100644 (file)
@@ -110,14 +110,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
        };
 }
 
-#define iov_for_each(iov, iter, start)                         \
-       if (iov_iter_type(start) == ITER_IOVEC ||               \
-           iov_iter_type(start) == ITER_KVEC)                  \
-       for (iter = (start);                                    \
-            (iter).count &&                                    \
-            ((iov = iov_iter_iovec(&(iter))), 1);              \
-            iov_iter_advance(&(iter), (iov).iov_len))
-
 size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
index be4bd627caf060cd89aa41ac88208946da568035..ea36dc355da131b4a45b71d8be6f1bc69a53e637 100644 (file)
@@ -861,8 +861,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
 
 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 {
-       struct page *head = compound_head(page);
-       size_t v = n + offset + page_address(page) - page_address(head);
+       struct page *head;
+       size_t v = n + offset;
+
+       /*
+        * The general case needs to access the page order in order
+        * to compute the page size.
+        * However, we mostly deal with order-0 pages and thus can
+        * avoid a possible cache line miss for requests that fit all
+        * page orders.
+        */
+       if (n <= v && v <= PAGE_SIZE)
+               return true;
+
+       head = compound_head(page);
+       v += (page - head) << PAGE_SHIFT;
 
        if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
                return true;