Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / lib / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9 #include <linux/scatterlist.h>
10
11 #define PIPE_PARANOIA /* for now */
12
13 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
14         size_t left;                                    \
15         size_t wanted = n;                              \
16         __p = i->iov;                                   \
17         __v.iov_len = min(n, __p->iov_len - skip);      \
18         if (likely(__v.iov_len)) {                      \
19                 __v.iov_base = __p->iov_base + skip;    \
20                 left = (STEP);                          \
21                 __v.iov_len -= left;                    \
22                 skip += __v.iov_len;                    \
23                 n -= __v.iov_len;                       \
24         } else {                                        \
25                 left = 0;                               \
26         }                                               \
27         while (unlikely(!left && n)) {                  \
28                 __p++;                                  \
29                 __v.iov_len = min(n, __p->iov_len);     \
30                 if (unlikely(!__v.iov_len))             \
31                         continue;                       \
32                 __v.iov_base = __p->iov_base;           \
33                 left = (STEP);                          \
34                 __v.iov_len -= left;                    \
35                 skip = __v.iov_len;                     \
36                 n -= __v.iov_len;                       \
37         }                                               \
38         n = wanted - n;                                 \
39 }
40
41 #define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
42         size_t wanted = n;                              \
43         __p = i->kvec;                                  \
44         __v.iov_len = min(n, __p->iov_len - skip);      \
45         if (likely(__v.iov_len)) {                      \
46                 __v.iov_base = __p->iov_base + skip;    \
47                 (void)(STEP);                           \
48                 skip += __v.iov_len;                    \
49                 n -= __v.iov_len;                       \
50         }                                               \
51         while (unlikely(n)) {                           \
52                 __p++;                                  \
53                 __v.iov_len = min(n, __p->iov_len);     \
54                 if (unlikely(!__v.iov_len))             \
55                         continue;                       \
56                 __v.iov_base = __p->iov_base;           \
57                 (void)(STEP);                           \
58                 skip = __v.iov_len;                     \
59                 n -= __v.iov_len;                       \
60         }                                               \
61         n = wanted;                                     \
62 }
63
64 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {     \
65         struct bvec_iter __start;                       \
66         __start.bi_size = n;                            \
67         __start.bi_bvec_done = skip;                    \
68         __start.bi_idx = 0;                             \
69         for_each_bvec(__v, i->bvec, __bi, __start) {    \
70                 if (!__v.bv_len)                        \
71                         continue;                       \
72                 (void)(STEP);                           \
73         }                                               \
74 }
75
76 #define iterate_all_kinds(i, n, v, I, B, K) {                   \
77         if (likely(n)) {                                        \
78                 size_t skip = i->iov_offset;                    \
79                 if (unlikely(i->type & ITER_BVEC)) {            \
80                         struct bio_vec v;                       \
81                         struct bvec_iter __bi;                  \
82                         iterate_bvec(i, n, v, __bi, skip, (B))  \
83                 } else if (unlikely(i->type & ITER_KVEC)) {     \
84                         const struct kvec *kvec;                \
85                         struct kvec v;                          \
86                         iterate_kvec(i, n, v, kvec, skip, (K))  \
87                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
88                 } else {                                        \
89                         const struct iovec *iov;                \
90                         struct iovec v;                         \
91                         iterate_iovec(i, n, v, iov, skip, (I))  \
92                 }                                               \
93         }                                                       \
94 }
95
96 #define iterate_and_advance(i, n, v, I, B, K) {                 \
97         if (unlikely(i->count < n))                             \
98                 n = i->count;                                   \
99         if (i->count) {                                         \
100                 size_t skip = i->iov_offset;                    \
101                 if (unlikely(i->type & ITER_BVEC)) {            \
102                         const struct bio_vec *bvec = i->bvec;   \
103                         struct bio_vec v;                       \
104                         struct bvec_iter __bi;                  \
105                         iterate_bvec(i, n, v, __bi, skip, (B))  \
106                         i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
107                         i->nr_segs -= i->bvec - bvec;           \
108                         skip = __bi.bi_bvec_done;               \
109                 } else if (unlikely(i->type & ITER_KVEC)) {     \
110                         const struct kvec *kvec;                \
111                         struct kvec v;                          \
112                         iterate_kvec(i, n, v, kvec, skip, (K))  \
113                         if (skip == kvec->iov_len) {            \
114                                 kvec++;                         \
115                                 skip = 0;                       \
116                         }                                       \
117                         i->nr_segs -= kvec - i->kvec;           \
118                         i->kvec = kvec;                         \
119                 } else if (unlikely(i->type & ITER_DISCARD)) {  \
120                         skip += n;                              \
121                 } else {                                        \
122                         const struct iovec *iov;                \
123                         struct iovec v;                         \
124                         iterate_iovec(i, n, v, iov, skip, (I))  \
125                         if (skip == iov->iov_len) {             \
126                                 iov++;                          \
127                                 skip = 0;                       \
128                         }                                       \
129                         i->nr_segs -= iov - i->iov;             \
130                         i->iov = iov;                           \
131                 }                                               \
132                 i->count -= n;                                  \
133                 i->iov_offset = skip;                           \
134         }                                                       \
135 }
136
137 static int copyout(void __user *to, const void *from, size_t n)
138 {
139         if (access_ok(to, n)) {
140                 kasan_check_read(from, n);
141                 n = raw_copy_to_user(to, from, n);
142         }
143         return n;
144 }
145
146 static int copyin(void *to, const void __user *from, size_t n)
147 {
148         if (access_ok(from, n)) {
149                 kasan_check_write(to, n);
150                 n = raw_copy_from_user(to, from, n);
151         }
152         return n;
153 }
154
155 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
156                          struct iov_iter *i)
157 {
158         size_t skip, copy, left, wanted;
159         const struct iovec *iov;
160         char __user *buf;
161         void *kaddr, *from;
162
163         if (unlikely(bytes > i->count))
164                 bytes = i->count;
165
166         if (unlikely(!bytes))
167                 return 0;
168
169         might_fault();
170         wanted = bytes;
171         iov = i->iov;
172         skip = i->iov_offset;
173         buf = iov->iov_base + skip;
174         copy = min(bytes, iov->iov_len - skip);
175
176         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
177                 kaddr = kmap_atomic(page);
178                 from = kaddr + offset;
179
180                 /* first chunk, usually the only one */
181                 left = copyout(buf, from, copy);
182                 copy -= left;
183                 skip += copy;
184                 from += copy;
185                 bytes -= copy;
186
187                 while (unlikely(!left && bytes)) {
188                         iov++;
189                         buf = iov->iov_base;
190                         copy = min(bytes, iov->iov_len);
191                         left = copyout(buf, from, copy);
192                         copy -= left;
193                         skip = copy;
194                         from += copy;
195                         bytes -= copy;
196                 }
197                 if (likely(!bytes)) {
198                         kunmap_atomic(kaddr);
199                         goto done;
200                 }
201                 offset = from - kaddr;
202                 buf += copy;
203                 kunmap_atomic(kaddr);
204                 copy = min(bytes, iov->iov_len - skip);
205         }
206         /* Too bad - revert to non-atomic kmap */
207
208         kaddr = kmap(page);
209         from = kaddr + offset;
210         left = copyout(buf, from, copy);
211         copy -= left;
212         skip += copy;
213         from += copy;
214         bytes -= copy;
215         while (unlikely(!left && bytes)) {
216                 iov++;
217                 buf = iov->iov_base;
218                 copy = min(bytes, iov->iov_len);
219                 left = copyout(buf, from, copy);
220                 copy -= left;
221                 skip = copy;
222                 from += copy;
223                 bytes -= copy;
224         }
225         kunmap(page);
226
227 done:
228         if (skip == iov->iov_len) {
229                 iov++;
230                 skip = 0;
231         }
232         i->count -= wanted - bytes;
233         i->nr_segs -= iov - i->iov;
234         i->iov = iov;
235         i->iov_offset = skip;
236         return wanted - bytes;
237 }
238
239 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
240                          struct iov_iter *i)
241 {
242         size_t skip, copy, left, wanted;
243         const struct iovec *iov;
244         char __user *buf;
245         void *kaddr, *to;
246
247         if (unlikely(bytes > i->count))
248                 bytes = i->count;
249
250         if (unlikely(!bytes))
251                 return 0;
252
253         might_fault();
254         wanted = bytes;
255         iov = i->iov;
256         skip = i->iov_offset;
257         buf = iov->iov_base + skip;
258         copy = min(bytes, iov->iov_len - skip);
259
260         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
261                 kaddr = kmap_atomic(page);
262                 to = kaddr + offset;
263
264                 /* first chunk, usually the only one */
265                 left = copyin(to, buf, copy);
266                 copy -= left;
267                 skip += copy;
268                 to += copy;
269                 bytes -= copy;
270
271                 while (unlikely(!left && bytes)) {
272                         iov++;
273                         buf = iov->iov_base;
274                         copy = min(bytes, iov->iov_len);
275                         left = copyin(to, buf, copy);
276                         copy -= left;
277                         skip = copy;
278                         to += copy;
279                         bytes -= copy;
280                 }
281                 if (likely(!bytes)) {
282                         kunmap_atomic(kaddr);
283                         goto done;
284                 }
285                 offset = to - kaddr;
286                 buf += copy;
287                 kunmap_atomic(kaddr);
288                 copy = min(bytes, iov->iov_len - skip);
289         }
290         /* Too bad - revert to non-atomic kmap */
291
292         kaddr = kmap(page);
293         to = kaddr + offset;
294         left = copyin(to, buf, copy);
295         copy -= left;
296         skip += copy;
297         to += copy;
298         bytes -= copy;
299         while (unlikely(!left && bytes)) {
300                 iov++;
301                 buf = iov->iov_base;
302                 copy = min(bytes, iov->iov_len);
303                 left = copyin(to, buf, copy);
304                 copy -= left;
305                 skip = copy;
306                 to += copy;
307                 bytes -= copy;
308         }
309         kunmap(page);
310
311 done:
312         if (skip == iov->iov_len) {
313                 iov++;
314                 skip = 0;
315         }
316         i->count -= wanted - bytes;
317         i->nr_segs -= iov - i->iov;
318         i->iov = iov;
319         i->iov_offset = skip;
320         return wanted - bytes;
321 }
322
323 #ifdef PIPE_PARANOIA
324 static bool sanity(const struct iov_iter *i)
325 {
326         struct pipe_inode_info *pipe = i->pipe;
327         int idx = i->idx;
328         int next = pipe->curbuf + pipe->nrbufs;
329         if (i->iov_offset) {
330                 struct pipe_buffer *p;
331                 if (unlikely(!pipe->nrbufs))
332                         goto Bad;       // pipe must be non-empty
333                 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
334                         goto Bad;       // must be at the last buffer...
335
336                 p = &pipe->bufs[idx];
337                 if (unlikely(p->offset + p->len != i->iov_offset))
338                         goto Bad;       // ... at the end of segment
339         } else {
340                 if (idx != (next & (pipe->buffers - 1)))
341                         goto Bad;       // must be right after the last buffer
342         }
343         return true;
344 Bad:
345         printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
346         printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
347                         pipe->curbuf, pipe->nrbufs, pipe->buffers);
348         for (idx = 0; idx < pipe->buffers; idx++)
349                 printk(KERN_ERR "[%p %p %d %d]\n",
350                         pipe->bufs[idx].ops,
351                         pipe->bufs[idx].page,
352                         pipe->bufs[idx].offset,
353                         pipe->bufs[idx].len);
354         WARN_ON(1);
355         return false;
356 }
357 #else
358 #define sanity(i) true
359 #endif
360
361 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
362 {
363         return (idx + 1) & (pipe->buffers - 1);
364 }
365
366 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
367                          struct iov_iter *i)
368 {
369         struct pipe_inode_info *pipe = i->pipe;
370         struct pipe_buffer *buf;
371         size_t off;
372         int idx;
373
374         if (unlikely(bytes > i->count))
375                 bytes = i->count;
376
377         if (unlikely(!bytes))
378                 return 0;
379
380         if (!sanity(i))
381                 return 0;
382
383         off = i->iov_offset;
384         idx = i->idx;
385         buf = &pipe->bufs[idx];
386         if (off) {
387                 if (offset == off && buf->page == page) {
388                         /* merge with the last one */
389                         buf->len += bytes;
390                         i->iov_offset += bytes;
391                         goto out;
392                 }
393                 idx = next_idx(idx, pipe);
394                 buf = &pipe->bufs[idx];
395         }
396         if (idx == pipe->curbuf && pipe->nrbufs)
397                 return 0;
398         pipe->nrbufs++;
399         buf->ops = &page_cache_pipe_buf_ops;
400         get_page(buf->page = page);
401         buf->offset = offset;
402         buf->len = bytes;
403         i->iov_offset = offset + bytes;
404         i->idx = idx;
405 out:
406         i->count -= bytes;
407         return bytes;
408 }
409
410 /*
411  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
412  * bytes.  For each iovec, fault in each page that constitutes the iovec.
413  *
414  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
415  * because it is an invalid address).
416  */
417 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
418 {
419         size_t skip = i->iov_offset;
420         const struct iovec *iov;
421         int err;
422         struct iovec v;
423
424         if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
425                 iterate_iovec(i, bytes, v, iov, skip, ({
426                         err = fault_in_pages_readable(v.iov_base, v.iov_len);
427                         if (unlikely(err))
428                         return err;
429                 0;}))
430         }
431         return 0;
432 }
433 EXPORT_SYMBOL(iov_iter_fault_in_readable);
434
435 void iov_iter_init(struct iov_iter *i, unsigned int direction,
436                         const struct iovec *iov, unsigned long nr_segs,
437                         size_t count)
438 {
439         WARN_ON(direction & ~(READ | WRITE));
440         direction &= READ | WRITE;
441
442         /* It will get better.  Eventually... */
443         if (uaccess_kernel()) {
444                 i->type = ITER_KVEC | direction;
445                 i->kvec = (struct kvec *)iov;
446         } else {
447                 i->type = ITER_IOVEC | direction;
448                 i->iov = iov;
449         }
450         i->nr_segs = nr_segs;
451         i->iov_offset = 0;
452         i->count = count;
453 }
454 EXPORT_SYMBOL(iov_iter_init);
455
456 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
457 {
458         char *from = kmap_atomic(page);
459         memcpy(to, from + offset, len);
460         kunmap_atomic(from);
461 }
462
463 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
464 {
465         char *to = kmap_atomic(page);
466         memcpy(to + offset, from, len);
467         kunmap_atomic(to);
468 }
469
470 static void memzero_page(struct page *page, size_t offset, size_t len)
471 {
472         char *addr = kmap_atomic(page);
473         memset(addr + offset, 0, len);
474         kunmap_atomic(addr);
475 }
476
477 static inline bool allocated(struct pipe_buffer *buf)
478 {
479         return buf->ops == &default_pipe_buf_ops;
480 }
481
482 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
483 {
484         size_t off = i->iov_offset;
485         int idx = i->idx;
486         if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
487                 idx = next_idx(idx, i->pipe);
488                 off = 0;
489         }
490         *idxp = idx;
491         *offp = off;
492 }
493
494 static size_t push_pipe(struct iov_iter *i, size_t size,
495                         int *idxp, size_t *offp)
496 {
497         struct pipe_inode_info *pipe = i->pipe;
498         size_t off;
499         int idx;
500         ssize_t left;
501
502         if (unlikely(size > i->count))
503                 size = i->count;
504         if (unlikely(!size))
505                 return 0;
506
507         left = size;
508         data_start(i, &idx, &off);
509         *idxp = idx;
510         *offp = off;
511         if (off) {
512                 left -= PAGE_SIZE - off;
513                 if (left <= 0) {
514                         pipe->bufs[idx].len += size;
515                         return size;
516                 }
517                 pipe->bufs[idx].len = PAGE_SIZE;
518                 idx = next_idx(idx, pipe);
519         }
520         while (idx != pipe->curbuf || !pipe->nrbufs) {
521                 struct page *page = alloc_page(GFP_USER);
522                 if (!page)
523                         break;
524                 pipe->nrbufs++;
525                 pipe->bufs[idx].ops = &default_pipe_buf_ops;
526                 pipe->bufs[idx].page = page;
527                 pipe->bufs[idx].offset = 0;
528                 if (left <= PAGE_SIZE) {
529                         pipe->bufs[idx].len = left;
530                         return size;
531                 }
532                 pipe->bufs[idx].len = PAGE_SIZE;
533                 left -= PAGE_SIZE;
534                 idx = next_idx(idx, pipe);
535         }
536         return size - left;
537 }
538
539 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
540                                 struct iov_iter *i)
541 {
542         struct pipe_inode_info *pipe = i->pipe;
543         size_t n, off;
544         int idx;
545
546         if (!sanity(i))
547                 return 0;
548
549         bytes = n = push_pipe(i, bytes, &idx, &off);
550         if (unlikely(!n))
551                 return 0;
552         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
553                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
554                 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
555                 i->idx = idx;
556                 i->iov_offset = off + chunk;
557                 n -= chunk;
558                 addr += chunk;
559         }
560         i->count -= bytes;
561         return bytes;
562 }
563
564 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
565                               __wsum sum, size_t off)
566 {
567         __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
568         return csum_block_add(sum, next, off);
569 }
570
571 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
572                                 __wsum *csum, struct iov_iter *i)
573 {
574         struct pipe_inode_info *pipe = i->pipe;
575         size_t n, r;
576         size_t off = 0;
577         __wsum sum = *csum;
578         int idx;
579
580         if (!sanity(i))
581                 return 0;
582
583         bytes = n = push_pipe(i, bytes, &idx, &r);
584         if (unlikely(!n))
585                 return 0;
586         for ( ; n; idx = next_idx(idx, pipe), r = 0) {
587                 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
588                 char *p = kmap_atomic(pipe->bufs[idx].page);
589                 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
590                 kunmap_atomic(p);
591                 i->idx = idx;
592                 i->iov_offset = r + chunk;
593                 n -= chunk;
594                 off += chunk;
595                 addr += chunk;
596         }
597         i->count -= bytes;
598         *csum = sum;
599         return bytes;
600 }
601
602 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
603 {
604         const char *from = addr;
605         if (unlikely(iov_iter_is_pipe(i)))
606                 return copy_pipe_to_iter(addr, bytes, i);
607         if (iter_is_iovec(i))
608                 might_fault();
609         iterate_and_advance(i, bytes, v,
610                 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
611                 memcpy_to_page(v.bv_page, v.bv_offset,
612                                (from += v.bv_len) - v.bv_len, v.bv_len),
613                 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
614         )
615
616         return bytes;
617 }
618 EXPORT_SYMBOL(_copy_to_iter);
619
620 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
621 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
622 {
623         if (access_ok(to, n)) {
624                 kasan_check_read(from, n);
625                 n = copy_to_user_mcsafe((__force void *) to, from, n);
626         }
627         return n;
628 }
629
630 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
631                 const char *from, size_t len)
632 {
633         unsigned long ret;
634         char *to;
635
636         to = kmap_atomic(page);
637         ret = memcpy_mcsafe(to + offset, from, len);
638         kunmap_atomic(to);
639
640         return ret;
641 }
642
643 static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
644                                 struct iov_iter *i)
645 {
646         struct pipe_inode_info *pipe = i->pipe;
647         size_t n, off, xfer = 0;
648         int idx;
649
650         if (!sanity(i))
651                 return 0;
652
653         bytes = n = push_pipe(i, bytes, &idx, &off);
654         if (unlikely(!n))
655                 return 0;
656         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
657                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
658                 unsigned long rem;
659
660                 rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
661                                 chunk);
662                 i->idx = idx;
663                 i->iov_offset = off + chunk - rem;
664                 xfer += chunk - rem;
665                 if (rem)
666                         break;
667                 n -= chunk;
668                 addr += chunk;
669         }
670         i->count -= xfer;
671         return xfer;
672 }
673
674 /**
675  * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
676  * @addr: source kernel address
677  * @bytes: total transfer length
678  * @iter: destination iterator
679  *
680  * The pmem driver arranges for filesystem-dax to use this facility via
681  * dax_copy_to_iter() for protecting read/write to persistent memory.
682  * Unless / until an architecture can guarantee identical performance
683  * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
684  * performance regression to switch more users to the mcsafe version.
685  *
686  * Otherwise, the main differences between this and typical _copy_to_iter().
687  *
688  * * Typical tail/residue handling after a fault retries the copy
689  *   byte-by-byte until the fault happens again. Re-triggering machine
690  *   checks is potentially fatal so the implementation uses source
691  *   alignment and poison alignment assumptions to avoid re-triggering
692  *   hardware exceptions.
693  *
694  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
695  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
696  *   a short copy.
697  *
698  * See MCSAFE_TEST for self-test.
699  */
700 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
701 {
702         const char *from = addr;
703         unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
704
705         if (unlikely(iov_iter_is_pipe(i)))
706                 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
707         if (iter_is_iovec(i))
708                 might_fault();
709         iterate_and_advance(i, bytes, v,
710                 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
711                 ({
712                 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
713                                (from += v.bv_len) - v.bv_len, v.bv_len);
714                 if (rem) {
715                         curr_addr = (unsigned long) from;
716                         bytes = curr_addr - s_addr - rem;
717                         return bytes;
718                 }
719                 }),
720                 ({
721                 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
722                                 v.iov_len);
723                 if (rem) {
724                         curr_addr = (unsigned long) from;
725                         bytes = curr_addr - s_addr - rem;
726                         return bytes;
727                 }
728                 })
729         )
730
731         return bytes;
732 }
733 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
734 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
735
736 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
737 {
738         char *to = addr;
739         if (unlikely(iov_iter_is_pipe(i))) {
740                 WARN_ON(1);
741                 return 0;
742         }
743         if (iter_is_iovec(i))
744                 might_fault();
745         iterate_and_advance(i, bytes, v,
746                 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
747                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
748                                  v.bv_offset, v.bv_len),
749                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
750         )
751
752         return bytes;
753 }
754 EXPORT_SYMBOL(_copy_from_iter);
755
756 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
757 {
758         char *to = addr;
759         if (unlikely(iov_iter_is_pipe(i))) {
760                 WARN_ON(1);
761                 return false;
762         }
763         if (unlikely(i->count < bytes))
764                 return false;
765
766         if (iter_is_iovec(i))
767                 might_fault();
768         iterate_all_kinds(i, bytes, v, ({
769                 if (copyin((to += v.iov_len) - v.iov_len,
770                                       v.iov_base, v.iov_len))
771                         return false;
772                 0;}),
773                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
774                                  v.bv_offset, v.bv_len),
775                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
776         )
777
778         iov_iter_advance(i, bytes);
779         return true;
780 }
781 EXPORT_SYMBOL(_copy_from_iter_full);
782
783 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
784 {
785         char *to = addr;
786         if (unlikely(iov_iter_is_pipe(i))) {
787                 WARN_ON(1);
788                 return 0;
789         }
790         iterate_and_advance(i, bytes, v,
791                 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
792                                          v.iov_base, v.iov_len),
793                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
794                                  v.bv_offset, v.bv_len),
795                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
796         )
797
798         return bytes;
799 }
800 EXPORT_SYMBOL(_copy_from_iter_nocache);
801
802 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
803 /**
804  * _copy_from_iter_flushcache - write destination through cpu cache
805  * @addr: destination kernel address
806  * @bytes: total transfer length
807  * @iter: source iterator
808  *
809  * The pmem driver arranges for filesystem-dax to use this facility via
810  * dax_copy_from_iter() for ensuring that writes to persistent memory
811  * are flushed through the CPU cache. It is differentiated from
812  * _copy_from_iter_nocache() in that guarantees all data is flushed for
813  * all iterator types. The _copy_from_iter_nocache() only attempts to
814  * bypass the cache for the ITER_IOVEC case, and on some archs may use
815  * instructions that strand dirty-data in the cache.
816  */
817 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
818 {
819         char *to = addr;
820         if (unlikely(iov_iter_is_pipe(i))) {
821                 WARN_ON(1);
822                 return 0;
823         }
824         iterate_and_advance(i, bytes, v,
825                 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
826                                          v.iov_base, v.iov_len),
827                 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
828                                  v.bv_offset, v.bv_len),
829                 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
830                         v.iov_len)
831         )
832
833         return bytes;
834 }
835 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
836 #endif
837
838 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
839 {
840         char *to = addr;
841         if (unlikely(iov_iter_is_pipe(i))) {
842                 WARN_ON(1);
843                 return false;
844         }
845         if (unlikely(i->count < bytes))
846                 return false;
847         iterate_all_kinds(i, bytes, v, ({
848                 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
849                                              v.iov_base, v.iov_len))
850                         return false;
851                 0;}),
852                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
853                                  v.bv_offset, v.bv_len),
854                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
855         )
856
857         iov_iter_advance(i, bytes);
858         return true;
859 }
860 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
861
862 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
863 {
864         struct page *head;
865         size_t v = n + offset;
866
867         /*
868          * The general case needs to access the page order in order
869          * to compute the page size.
870          * However, we mostly deal with order-0 pages and thus can
871          * avoid a possible cache line miss for requests that fit all
872          * page orders.
873          */
874         if (n <= v && v <= PAGE_SIZE)
875                 return true;
876
877         head = compound_head(page);
878         v += (page - head) << PAGE_SHIFT;
879
880         if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
881                 return true;
882         WARN_ON(1);
883         return false;
884 }
885
886 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
887                          struct iov_iter *i)
888 {
889         if (unlikely(!page_copy_sane(page, offset, bytes)))
890                 return 0;
891         if (i->type & (ITER_BVEC|ITER_KVEC)) {
892                 void *kaddr = kmap_atomic(page);
893                 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
894                 kunmap_atomic(kaddr);
895                 return wanted;
896         } else if (unlikely(iov_iter_is_discard(i)))
897                 return bytes;
898         else if (likely(!iov_iter_is_pipe(i)))
899                 return copy_page_to_iter_iovec(page, offset, bytes, i);
900         else
901                 return copy_page_to_iter_pipe(page, offset, bytes, i);
902 }
903 EXPORT_SYMBOL(copy_page_to_iter);
904
905 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
906                          struct iov_iter *i)
907 {
908         if (unlikely(!page_copy_sane(page, offset, bytes)))
909                 return 0;
910         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
911                 WARN_ON(1);
912                 return 0;
913         }
914         if (i->type & (ITER_BVEC|ITER_KVEC)) {
915                 void *kaddr = kmap_atomic(page);
916                 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
917                 kunmap_atomic(kaddr);
918                 return wanted;
919         } else
920                 return copy_page_from_iter_iovec(page, offset, bytes, i);
921 }
922 EXPORT_SYMBOL(copy_page_from_iter);
923
924 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
925 {
926         struct pipe_inode_info *pipe = i->pipe;
927         size_t n, off;
928         int idx;
929
930         if (!sanity(i))
931                 return 0;
932
933         bytes = n = push_pipe(i, bytes, &idx, &off);
934         if (unlikely(!n))
935                 return 0;
936
937         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
938                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
939                 memzero_page(pipe->bufs[idx].page, off, chunk);
940                 i->idx = idx;
941                 i->iov_offset = off + chunk;
942                 n -= chunk;
943         }
944         i->count -= bytes;
945         return bytes;
946 }
947
948 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
949 {
950         if (unlikely(iov_iter_is_pipe(i)))
951                 return pipe_zero(bytes, i);
952         iterate_and_advance(i, bytes, v,
953                 clear_user(v.iov_base, v.iov_len),
954                 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
955                 memset(v.iov_base, 0, v.iov_len)
956         )
957
958         return bytes;
959 }
960 EXPORT_SYMBOL(iov_iter_zero);
961
962 size_t iov_iter_copy_from_user_atomic(struct page *page,
963                 struct iov_iter *i, unsigned long offset, size_t bytes)
964 {
965         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
966         if (unlikely(!page_copy_sane(page, offset, bytes))) {
967                 kunmap_atomic(kaddr);
968                 return 0;
969         }
970         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
971                 kunmap_atomic(kaddr);
972                 WARN_ON(1);
973                 return 0;
974         }
975         iterate_all_kinds(i, bytes, v,
976                 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
977                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
978                                  v.bv_offset, v.bv_len),
979                 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
980         )
981         kunmap_atomic(kaddr);
982         return bytes;
983 }
984 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
985
986 static inline void pipe_truncate(struct iov_iter *i)
987 {
988         struct pipe_inode_info *pipe = i->pipe;
989         if (pipe->nrbufs) {
990                 size_t off = i->iov_offset;
991                 int idx = i->idx;
992                 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
993                 if (off) {
994                         pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
995                         idx = next_idx(idx, pipe);
996                         nrbufs++;
997                 }
998                 while (pipe->nrbufs > nrbufs) {
999                         pipe_buf_release(pipe, &pipe->bufs[idx]);
1000                         idx = next_idx(idx, pipe);
1001                         pipe->nrbufs--;
1002                 }
1003         }
1004 }
1005
1006 static void pipe_advance(struct iov_iter *i, size_t size)
1007 {
1008         struct pipe_inode_info *pipe = i->pipe;
1009         if (unlikely(i->count < size))
1010                 size = i->count;
1011         if (size) {
1012                 struct pipe_buffer *buf;
1013                 size_t off = i->iov_offset, left = size;
1014                 int idx = i->idx;
1015                 if (off) /* make it relative to the beginning of buffer */
1016                         left += off - pipe->bufs[idx].offset;
1017                 while (1) {
1018                         buf = &pipe->bufs[idx];
1019                         if (left <= buf->len)
1020                                 break;
1021                         left -= buf->len;
1022                         idx = next_idx(idx, pipe);
1023                 }
1024                 i->idx = idx;
1025                 i->iov_offset = buf->offset + left;
1026         }
1027         i->count -= size;
1028         /* ... and discard everything past that point */
1029         pipe_truncate(i);
1030 }
1031
1032 void iov_iter_advance(struct iov_iter *i, size_t size)
1033 {
1034         if (unlikely(iov_iter_is_pipe(i))) {
1035                 pipe_advance(i, size);
1036                 return;
1037         }
1038         if (unlikely(iov_iter_is_discard(i))) {
1039                 i->count -= size;
1040                 return;
1041         }
1042         iterate_and_advance(i, size, v, 0, 0, 0)
1043 }
1044 EXPORT_SYMBOL(iov_iter_advance);
1045
1046 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1047 {
1048         if (!unroll)
1049                 return;
1050         if (WARN_ON(unroll > MAX_RW_COUNT))
1051                 return;
1052         i->count += unroll;
1053         if (unlikely(iov_iter_is_pipe(i))) {
1054                 struct pipe_inode_info *pipe = i->pipe;
1055                 int idx = i->idx;
1056                 size_t off = i->iov_offset;
1057                 while (1) {
1058                         size_t n = off - pipe->bufs[idx].offset;
1059                         if (unroll < n) {
1060                                 off -= unroll;
1061                                 break;
1062                         }
1063                         unroll -= n;
1064                         if (!unroll && idx == i->start_idx) {
1065                                 off = 0;
1066                                 break;
1067                         }
1068                         if (!idx--)
1069                                 idx = pipe->buffers - 1;
1070                         off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
1071                 }
1072                 i->iov_offset = off;
1073                 i->idx = idx;
1074                 pipe_truncate(i);
1075                 return;
1076         }
1077         if (unlikely(iov_iter_is_discard(i)))
1078                 return;
1079         if (unroll <= i->iov_offset) {
1080                 i->iov_offset -= unroll;
1081                 return;
1082         }
1083         unroll -= i->iov_offset;
1084         if (iov_iter_is_bvec(i)) {
1085                 const struct bio_vec *bvec = i->bvec;
1086                 while (1) {
1087                         size_t n = (--bvec)->bv_len;
1088                         i->nr_segs++;
1089                         if (unroll <= n) {
1090                                 i->bvec = bvec;
1091                                 i->iov_offset = n - unroll;
1092                                 return;
1093                         }
1094                         unroll -= n;
1095                 }
1096         } else { /* same logics for iovec and kvec */
1097                 const struct iovec *iov = i->iov;
1098                 while (1) {
1099                         size_t n = (--iov)->iov_len;
1100                         i->nr_segs++;
1101                         if (unroll <= n) {
1102                                 i->iov = iov;
1103                                 i->iov_offset = n - unroll;
1104                                 return;
1105                         }
1106                         unroll -= n;
1107                 }
1108         }
1109 }
1110 EXPORT_SYMBOL(iov_iter_revert);
1111
1112 /*
1113  * Return the count of just the current iov_iter segment.
1114  */
1115 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1116 {
1117         if (unlikely(iov_iter_is_pipe(i)))
1118                 return i->count;        // it is a silly place, anyway
1119         if (i->nr_segs == 1)
1120                 return i->count;
1121         if (unlikely(iov_iter_is_discard(i)))
1122                 return i->count;
1123         else if (iov_iter_is_bvec(i))
1124                 return min(i->count, i->bvec->bv_len - i->iov_offset);
1125         else
1126                 return min(i->count, i->iov->iov_len - i->iov_offset);
1127 }
1128 EXPORT_SYMBOL(iov_iter_single_seg_count);
1129
1130 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1131                         const struct kvec *kvec, unsigned long nr_segs,
1132                         size_t count)
1133 {
1134         WARN_ON(direction & ~(READ | WRITE));
1135         i->type = ITER_KVEC | (direction & (READ | WRITE));
1136         i->kvec = kvec;
1137         i->nr_segs = nr_segs;
1138         i->iov_offset = 0;
1139         i->count = count;
1140 }
1141 EXPORT_SYMBOL(iov_iter_kvec);
1142
1143 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1144                         const struct bio_vec *bvec, unsigned long nr_segs,
1145                         size_t count)
1146 {
1147         WARN_ON(direction & ~(READ | WRITE));
1148         i->type = ITER_BVEC | (direction & (READ | WRITE));
1149         i->bvec = bvec;
1150         i->nr_segs = nr_segs;
1151         i->iov_offset = 0;
1152         i->count = count;
1153 }
1154 EXPORT_SYMBOL(iov_iter_bvec);
1155
1156 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1157                         struct pipe_inode_info *pipe,
1158                         size_t count)
1159 {
1160         BUG_ON(direction != READ);
1161         WARN_ON(pipe->nrbufs == pipe->buffers);
1162         i->type = ITER_PIPE | READ;
1163         i->pipe = pipe;
1164         i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1165         i->iov_offset = 0;
1166         i->count = count;
1167         i->start_idx = i->idx;
1168 }
1169 EXPORT_SYMBOL(iov_iter_pipe);
1170
1171 /**
1172  * iov_iter_discard - Initialise an I/O iterator that discards data
1173  * @i: The iterator to initialise.
1174  * @direction: The direction of the transfer.
1175  * @count: The size of the I/O buffer in bytes.
1176  *
1177  * Set up an I/O iterator that just discards everything that's written to it.
1178  * It's only available as a READ iterator.
1179  */
1180 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1181 {
1182         BUG_ON(direction != READ);
1183         i->type = ITER_DISCARD | READ;
1184         i->count = count;
1185         i->iov_offset = 0;
1186 }
1187 EXPORT_SYMBOL(iov_iter_discard);
1188
1189 unsigned long iov_iter_alignment(const struct iov_iter *i)
1190 {
1191         unsigned long res = 0;
1192         size_t size = i->count;
1193
1194         if (unlikely(iov_iter_is_pipe(i))) {
1195                 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1196                         return size | i->iov_offset;
1197                 return size;
1198         }
1199         iterate_all_kinds(i, size, v,
1200                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1201                 res |= v.bv_offset | v.bv_len,
1202                 res |= (unsigned long)v.iov_base | v.iov_len
1203         )
1204         return res;
1205 }
1206 EXPORT_SYMBOL(iov_iter_alignment);
1207
1208 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1209 {
1210         unsigned long res = 0;
1211         size_t size = i->count;
1212
1213         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1214                 WARN_ON(1);
1215                 return ~0U;
1216         }
1217
1218         iterate_all_kinds(i, size, v,
1219                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1220                         (size != v.iov_len ? size : 0), 0),
1221                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1222                         (size != v.bv_len ? size : 0)),
1223                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1224                         (size != v.iov_len ? size : 0))
1225                 );
1226         return res;
1227 }
1228 EXPORT_SYMBOL(iov_iter_gap_alignment);
1229
1230 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1231                                 size_t maxsize,
1232                                 struct page **pages,
1233                                 int idx,
1234                                 size_t *start)
1235 {
1236         struct pipe_inode_info *pipe = i->pipe;
1237         ssize_t n = push_pipe(i, maxsize, &idx, start);
1238         if (!n)
1239                 return -EFAULT;
1240
1241         maxsize = n;
1242         n += *start;
1243         while (n > 0) {
1244                 get_page(*pages++ = pipe->bufs[idx].page);
1245                 idx = next_idx(idx, pipe);
1246                 n -= PAGE_SIZE;
1247         }
1248
1249         return maxsize;
1250 }
1251
1252 static ssize_t pipe_get_pages(struct iov_iter *i,
1253                    struct page **pages, size_t maxsize, unsigned maxpages,
1254                    size_t *start)
1255 {
1256         unsigned npages;
1257         size_t capacity;
1258         int idx;
1259
1260         if (!maxsize)
1261                 return 0;
1262
1263         if (!sanity(i))
1264                 return -EFAULT;
1265
1266         data_start(i, &idx, start);
1267         /* some of this one + all after this one */
1268         npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1269         capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1270
1271         return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1272 }
1273
1274 ssize_t iov_iter_get_pages(struct iov_iter *i,
1275                    struct page **pages, size_t maxsize, unsigned maxpages,
1276                    size_t *start)
1277 {
1278         if (maxsize > i->count)
1279                 maxsize = i->count;
1280
1281         if (unlikely(iov_iter_is_pipe(i)))
1282                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1283         if (unlikely(iov_iter_is_discard(i)))
1284                 return -EFAULT;
1285
1286         iterate_all_kinds(i, maxsize, v, ({
1287                 unsigned long addr = (unsigned long)v.iov_base;
1288                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1289                 int n;
1290                 int res;
1291
1292                 if (len > maxpages * PAGE_SIZE)
1293                         len = maxpages * PAGE_SIZE;
1294                 addr &= ~(PAGE_SIZE - 1);
1295                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1296                 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
1297                 if (unlikely(res < 0))
1298                         return res;
1299                 return (res == n ? len : res * PAGE_SIZE) - *start;
1300         0;}),({
1301                 /* can't be more than PAGE_SIZE */
1302                 *start = v.bv_offset;
1303                 get_page(*pages = v.bv_page);
1304                 return v.bv_len;
1305         }),({
1306                 return -EFAULT;
1307         })
1308         )
1309         return 0;
1310 }
1311 EXPORT_SYMBOL(iov_iter_get_pages);
1312
1313 static struct page **get_pages_array(size_t n)
1314 {
1315         return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1316 }
1317
1318 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1319                    struct page ***pages, size_t maxsize,
1320                    size_t *start)
1321 {
1322         struct page **p;
1323         ssize_t n;
1324         int idx;
1325         int npages;
1326
1327         if (!maxsize)
1328                 return 0;
1329
1330         if (!sanity(i))
1331                 return -EFAULT;
1332
1333         data_start(i, &idx, start);
1334         /* some of this one + all after this one */
1335         npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1336         n = npages * PAGE_SIZE - *start;
1337         if (maxsize > n)
1338                 maxsize = n;
1339         else
1340                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1341         p = get_pages_array(npages);
1342         if (!p)
1343                 return -ENOMEM;
1344         n = __pipe_get_pages(i, maxsize, p, idx, start);
1345         if (n > 0)
1346                 *pages = p;
1347         else
1348                 kvfree(p);
1349         return n;
1350 }
1351
1352 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1353                    struct page ***pages, size_t maxsize,
1354                    size_t *start)
1355 {
1356         struct page **p;
1357
1358         if (maxsize > i->count)
1359                 maxsize = i->count;
1360
1361         if (unlikely(iov_iter_is_pipe(i)))
1362                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1363         if (unlikely(iov_iter_is_discard(i)))
1364                 return -EFAULT;
1365
1366         iterate_all_kinds(i, maxsize, v, ({
1367                 unsigned long addr = (unsigned long)v.iov_base;
1368                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1369                 int n;
1370                 int res;
1371
1372                 addr &= ~(PAGE_SIZE - 1);
1373                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1374                 p = get_pages_array(n);
1375                 if (!p)
1376                         return -ENOMEM;
1377                 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
1378                 if (unlikely(res < 0)) {
1379                         kvfree(p);
1380                         return res;
1381                 }
1382                 *pages = p;
1383                 return (res == n ? len : res * PAGE_SIZE) - *start;
1384         0;}),({
1385                 /* can't be more than PAGE_SIZE */
1386                 *start = v.bv_offset;
1387                 *pages = p = get_pages_array(1);
1388                 if (!p)
1389                         return -ENOMEM;
1390                 get_page(*p = v.bv_page);
1391                 return v.bv_len;
1392         }),({
1393                 return -EFAULT;
1394         })
1395         )
1396         return 0;
1397 }
1398 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1399
1400 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1401                                struct iov_iter *i)
1402 {
1403         char *to = addr;
1404         __wsum sum, next;
1405         size_t off = 0;
1406         sum = *csum;
1407         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1408                 WARN_ON(1);
1409                 return 0;
1410         }
1411         iterate_and_advance(i, bytes, v, ({
1412                 int err = 0;
1413                 next = csum_and_copy_from_user(v.iov_base,
1414                                                (to += v.iov_len) - v.iov_len,
1415                                                v.iov_len, 0, &err);
1416                 if (!err) {
1417                         sum = csum_block_add(sum, next, off);
1418                         off += v.iov_len;
1419                 }
1420                 err ? v.iov_len : 0;
1421         }), ({
1422                 char *p = kmap_atomic(v.bv_page);
1423                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1424                                       p + v.bv_offset, v.bv_len,
1425                                       sum, off);
1426                 kunmap_atomic(p);
1427                 off += v.bv_len;
1428         }),({
1429                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1430                                       v.iov_base, v.iov_len,
1431                                       sum, off);
1432                 off += v.iov_len;
1433         })
1434         )
1435         *csum = sum;
1436         return bytes;
1437 }
1438 EXPORT_SYMBOL(csum_and_copy_from_iter);
1439
1440 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1441                                struct iov_iter *i)
1442 {
1443         char *to = addr;
1444         __wsum sum, next;
1445         size_t off = 0;
1446         sum = *csum;
1447         if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1448                 WARN_ON(1);
1449                 return false;
1450         }
1451         if (unlikely(i->count < bytes))
1452                 return false;
1453         iterate_all_kinds(i, bytes, v, ({
1454                 int err = 0;
1455                 next = csum_and_copy_from_user(v.iov_base,
1456                                                (to += v.iov_len) - v.iov_len,
1457                                                v.iov_len, 0, &err);
1458                 if (err)
1459                         return false;
1460                 sum = csum_block_add(sum, next, off);
1461                 off += v.iov_len;
1462                 0;
1463         }), ({
1464                 char *p = kmap_atomic(v.bv_page);
1465                 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1466                                       p + v.bv_offset, v.bv_len,
1467                                       sum, off);
1468                 kunmap_atomic(p);
1469                 off += v.bv_len;
1470         }),({
1471                 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1472                                       v.iov_base, v.iov_len,
1473                                       sum, off);
1474                 off += v.iov_len;
1475         })
1476         )
1477         *csum = sum;
1478         iov_iter_advance(i, bytes);
1479         return true;
1480 }
1481 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1482
1483 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1484                              struct iov_iter *i)
1485 {
1486         const char *from = addr;
1487         __wsum *csum = csump;
1488         __wsum sum, next;
1489         size_t off = 0;
1490
1491         if (unlikely(iov_iter_is_pipe(i)))
1492                 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1493
1494         sum = *csum;
1495         if (unlikely(iov_iter_is_discard(i))) {
1496                 WARN_ON(1);     /* for now */
1497                 return 0;
1498         }
1499         iterate_and_advance(i, bytes, v, ({
1500                 int err = 0;
1501                 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1502                                              v.iov_base,
1503                                              v.iov_len, 0, &err);
1504                 if (!err) {
1505                         sum = csum_block_add(sum, next, off);
1506                         off += v.iov_len;
1507                 }
1508                 err ? v.iov_len : 0;
1509         }), ({
1510                 char *p = kmap_atomic(v.bv_page);
1511                 sum = csum_and_memcpy(p + v.bv_offset,
1512                                       (from += v.bv_len) - v.bv_len,
1513                                       v.bv_len, sum, off);
1514                 kunmap_atomic(p);
1515                 off += v.bv_len;
1516         }),({
1517                 sum = csum_and_memcpy(v.iov_base,
1518                                      (from += v.iov_len) - v.iov_len,
1519                                      v.iov_len, sum, off);
1520                 off += v.iov_len;
1521         })
1522         )
1523         *csum = sum;
1524         return bytes;
1525 }
1526 EXPORT_SYMBOL(csum_and_copy_to_iter);
1527
1528 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1529                 struct iov_iter *i)
1530 {
1531 #ifdef CONFIG_CRYPTO
1532         struct ahash_request *hash = hashp;
1533         struct scatterlist sg;
1534         size_t copied;
1535
1536         copied = copy_to_iter(addr, bytes, i);
1537         sg_init_one(&sg, addr, copied);
1538         ahash_request_set_crypt(hash, &sg, NULL, copied);
1539         crypto_ahash_update(hash);
1540         return copied;
1541 #else
1542         return 0;
1543 #endif
1544 }
1545 EXPORT_SYMBOL(hash_and_copy_to_iter);
1546
1547 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1548 {
1549         size_t size = i->count;
1550         int npages = 0;
1551
1552         if (!size)
1553                 return 0;
1554         if (unlikely(iov_iter_is_discard(i)))
1555                 return 0;
1556
1557         if (unlikely(iov_iter_is_pipe(i))) {
1558                 struct pipe_inode_info *pipe = i->pipe;
1559                 size_t off;
1560                 int idx;
1561
1562                 if (!sanity(i))
1563                         return 0;
1564
1565                 data_start(i, &idx, &off);
1566                 /* some of this one + all after this one */
1567                 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1568                 if (npages >= maxpages)
1569                         return maxpages;
1570         } else iterate_all_kinds(i, size, v, ({
1571                 unsigned long p = (unsigned long)v.iov_base;
1572                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1573                         - p / PAGE_SIZE;
1574                 if (npages >= maxpages)
1575                         return maxpages;
1576         0;}),({
1577                 npages++;
1578                 if (npages >= maxpages)
1579                         return maxpages;
1580         }),({
1581                 unsigned long p = (unsigned long)v.iov_base;
1582                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1583                         - p / PAGE_SIZE;
1584                 if (npages >= maxpages)
1585                         return maxpages;
1586         })
1587         )
1588         return npages;
1589 }
1590 EXPORT_SYMBOL(iov_iter_npages);
1591
1592 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1593 {
1594         *new = *old;
1595         if (unlikely(iov_iter_is_pipe(new))) {
1596                 WARN_ON(1);
1597                 return NULL;
1598         }
1599         if (unlikely(iov_iter_is_discard(new)))
1600                 return NULL;
1601         if (iov_iter_is_bvec(new))
1602                 return new->bvec = kmemdup(new->bvec,
1603                                     new->nr_segs * sizeof(struct bio_vec),
1604                                     flags);
1605         else
1606                 /* iovec and kvec have identical layout */
1607                 return new->iov = kmemdup(new->iov,
1608                                    new->nr_segs * sizeof(struct iovec),
1609                                    flags);
1610 }
1611 EXPORT_SYMBOL(dup_iter);
1612
1613 /**
1614  * import_iovec() - Copy an array of &struct iovec from userspace
1615  *     into the kernel, check that it is valid, and initialize a new
1616  *     &struct iov_iter iterator to access it.
1617  *
1618  * @type: One of %READ or %WRITE.
1619  * @uvector: Pointer to the userspace array.
1620  * @nr_segs: Number of elements in userspace array.
1621  * @fast_segs: Number of elements in @iov.
1622  * @iov: (input and output parameter) Pointer to pointer to (usually small
1623  *     on-stack) kernel array.
1624  * @i: Pointer to iterator that will be initialized on success.
1625  *
1626  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1627  * then this function places %NULL in *@iov on return. Otherwise, a new
1628  * array will be allocated and the result placed in *@iov. This means that
1629  * the caller may call kfree() on *@iov regardless of whether the small
1630  * on-stack array was used or not (and regardless of whether this function
1631  * returns an error or not).
1632  *
1633  * Return: 0 on success or negative error code on error.
1634  */
1635 int import_iovec(int type, const struct iovec __user * uvector,
1636                  unsigned nr_segs, unsigned fast_segs,
1637                  struct iovec **iov, struct iov_iter *i)
1638 {
1639         ssize_t n;
1640         struct iovec *p;
1641         n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1642                                   *iov, &p);
1643         if (n < 0) {
1644                 if (p != *iov)
1645                         kfree(p);
1646                 *iov = NULL;
1647                 return n;
1648         }
1649         iov_iter_init(i, type, p, nr_segs, n);
1650         *iov = p == *iov ? NULL : p;
1651         return 0;
1652 }
1653 EXPORT_SYMBOL(import_iovec);
1654
1655 #ifdef CONFIG_COMPAT
1656 #include <linux/compat.h>
1657
1658 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1659                  unsigned nr_segs, unsigned fast_segs,
1660                  struct iovec **iov, struct iov_iter *i)
1661 {
1662         ssize_t n;
1663         struct iovec *p;
1664         n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1665                                   *iov, &p);
1666         if (n < 0) {
1667                 if (p != *iov)
1668                         kfree(p);
1669                 *iov = NULL;
1670                 return n;
1671         }
1672         iov_iter_init(i, type, p, nr_segs, n);
1673         *iov = p == *iov ? NULL : p;
1674         return 0;
1675 }
1676 #endif
1677
1678 int import_single_range(int rw, void __user *buf, size_t len,
1679                  struct iovec *iov, struct iov_iter *i)
1680 {
1681         if (len > MAX_RW_COUNT)
1682                 len = MAX_RW_COUNT;
1683         if (unlikely(!access_ok(buf, len)))
1684                 return -EFAULT;
1685
1686         iov->iov_base = buf;
1687         iov->iov_len = len;
1688         iov_iter_init(i, rw, iov, 1, len);
1689         return 0;
1690 }
1691 EXPORT_SYMBOL(import_single_range);
1692
1693 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1694                             int (*f)(struct kvec *vec, void *context),
1695                             void *context)
1696 {
1697         struct kvec w;
1698         int err = -EINVAL;
1699         if (!bytes)
1700                 return 0;
1701
1702         iterate_all_kinds(i, bytes, v, -EINVAL, ({
1703                 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1704                 w.iov_len = v.bv_len;
1705                 err = f(&w, context);
1706                 kunmap(v.bv_page);
1707                 err;}), ({
1708                 w = v;
1709                 err = f(&w, context);})
1710         )
1711         return err;
1712 }
1713 EXPORT_SYMBOL(iov_iter_for_each_range);