Merge tag 'nfsd-4.15' of git://linux-nfs.org/~bfields/linux
[sfrench/cifs-2.6.git] / fs / iomap.c
1 /*
2  * Copyright (C) 2010 Red Hat, Inc.
3  * Copyright (c) 2016 Christoph Hellwig.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include <linux/sched/signal.h>
30
31 #include "internal.h"
32
33 /*
34  * Execute a iomap write on a segment of the mapping that spans a
35  * contiguous range of pages that have identical block mapping state.
36  *
37  * This avoids the need to map pages individually, do individual allocations
38  * for each page and most importantly avoid the need for filesystem specific
39  * locking per page. Instead, all the operations are amortised over the entire
40  * range of pages. It is assumed that the filesystems will lock whatever
41  * resources they require in the iomap_begin call, and release them in the
42  * iomap_end call.
43  */
44 loff_t
45 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
46                 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
47 {
48         struct iomap iomap = { 0 };
49         loff_t written = 0, ret;
50
51         /*
52          * Need to map a range from start position for length bytes. This can
53          * span multiple pages - it is only guaranteed to return a range of a
54          * single type of pages (e.g. all into a hole, all mapped or all
55          * unwritten). Failure at this point has nothing to undo.
56          *
57          * If allocation is required for this range, reserve the space now so
58          * that the allocation is guaranteed to succeed later on. Once we copy
59          * the data into the page cache pages, then we cannot fail otherwise we
60          * expose transient stale data. If the reserve fails, we can safely
61          * back out at this point as there is nothing to undo.
62          */
63         ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
64         if (ret)
65                 return ret;
66         if (WARN_ON(iomap.offset > pos))
67                 return -EIO;
68
69         /*
70          * Cut down the length to the one actually provided by the filesystem,
71          * as it might not be able to give us the whole size that we requested.
72          */
73         if (iomap.offset + iomap.length < pos + length)
74                 length = iomap.offset + iomap.length - pos;
75
76         /*
77          * Now that we have guaranteed that the space allocation will succeed.
78          * we can do the copy-in page by page without having to worry about
79          * failures exposing transient data.
80          */
81         written = actor(inode, pos, length, data, &iomap);
82
83         /*
84          * Now the data has been copied, commit the range we've copied.  This
85          * should not fail unless the filesystem has had a fatal error.
86          */
87         if (ops->iomap_end) {
88                 ret = ops->iomap_end(inode, pos, length,
89                                      written > 0 ? written : 0,
90                                      flags, &iomap);
91         }
92
93         return written ? written : ret;
94 }
95
96 static void
97 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
98 {
99         loff_t i_size = i_size_read(inode);
100
101         /*
102          * Only truncate newly allocated pages beyoned EOF, even if the
103          * write started inside the existing inode size.
104          */
105         if (pos + len > i_size)
106                 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
107 }
108
109 static int
110 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
111                 struct page **pagep, struct iomap *iomap)
112 {
113         pgoff_t index = pos >> PAGE_SHIFT;
114         struct page *page;
115         int status = 0;
116
117         BUG_ON(pos + len > iomap->offset + iomap->length);
118
119         if (fatal_signal_pending(current))
120                 return -EINTR;
121
122         page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
123         if (!page)
124                 return -ENOMEM;
125
126         status = __block_write_begin_int(page, pos, len, NULL, iomap);
127         if (unlikely(status)) {
128                 unlock_page(page);
129                 put_page(page);
130                 page = NULL;
131
132                 iomap_write_failed(inode, pos, len);
133         }
134
135         *pagep = page;
136         return status;
137 }
138
139 static int
140 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
141                 unsigned copied, struct page *page)
142 {
143         int ret;
144
145         ret = generic_write_end(NULL, inode->i_mapping, pos, len,
146                         copied, page, NULL);
147         if (ret < len)
148                 iomap_write_failed(inode, pos, len);
149         return ret;
150 }
151
152 static loff_t
153 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
154                 struct iomap *iomap)
155 {
156         struct iov_iter *i = data;
157         long status = 0;
158         ssize_t written = 0;
159         unsigned int flags = AOP_FLAG_NOFS;
160
161         do {
162                 struct page *page;
163                 unsigned long offset;   /* Offset into pagecache page */
164                 unsigned long bytes;    /* Bytes to write to page */
165                 size_t copied;          /* Bytes copied from user */
166
167                 offset = (pos & (PAGE_SIZE - 1));
168                 bytes = min_t(unsigned long, PAGE_SIZE - offset,
169                                                 iov_iter_count(i));
170 again:
171                 if (bytes > length)
172                         bytes = length;
173
174                 /*
175                  * Bring in the user page that we will copy from _first_.
176                  * Otherwise there's a nasty deadlock on copying from the
177                  * same page as we're writing to, without it being marked
178                  * up-to-date.
179                  *
180                  * Not only is this an optimisation, but it is also required
181                  * to check that the address is actually valid, when atomic
182                  * usercopies are used, below.
183                  */
184                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
185                         status = -EFAULT;
186                         break;
187                 }
188
189                 status = iomap_write_begin(inode, pos, bytes, flags, &page,
190                                 iomap);
191                 if (unlikely(status))
192                         break;
193
194                 if (mapping_writably_mapped(inode->i_mapping))
195                         flush_dcache_page(page);
196
197                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
198
199                 flush_dcache_page(page);
200
201                 status = iomap_write_end(inode, pos, bytes, copied, page);
202                 if (unlikely(status < 0))
203                         break;
204                 copied = status;
205
206                 cond_resched();
207
208                 iov_iter_advance(i, copied);
209                 if (unlikely(copied == 0)) {
210                         /*
211                          * If we were unable to copy any data at all, we must
212                          * fall back to a single segment length write.
213                          *
214                          * If we didn't fallback here, we could livelock
215                          * because not all segments in the iov can be copied at
216                          * once without a pagefault.
217                          */
218                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
219                                                 iov_iter_single_seg_count(i));
220                         goto again;
221                 }
222                 pos += copied;
223                 written += copied;
224                 length -= copied;
225
226                 balance_dirty_pages_ratelimited(inode->i_mapping);
227         } while (iov_iter_count(i) && length);
228
229         return written ? written : status;
230 }
231
232 ssize_t
233 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
234                 const struct iomap_ops *ops)
235 {
236         struct inode *inode = iocb->ki_filp->f_mapping->host;
237         loff_t pos = iocb->ki_pos, ret = 0, written = 0;
238
239         while (iov_iter_count(iter)) {
240                 ret = iomap_apply(inode, pos, iov_iter_count(iter),
241                                 IOMAP_WRITE, ops, iter, iomap_write_actor);
242                 if (ret <= 0)
243                         break;
244                 pos += ret;
245                 written += ret;
246         }
247
248         return written ? written : ret;
249 }
250 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
251
252 static struct page *
253 __iomap_read_page(struct inode *inode, loff_t offset)
254 {
255         struct address_space *mapping = inode->i_mapping;
256         struct page *page;
257
258         page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
259         if (IS_ERR(page))
260                 return page;
261         if (!PageUptodate(page)) {
262                 put_page(page);
263                 return ERR_PTR(-EIO);
264         }
265         return page;
266 }
267
268 static loff_t
269 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
270                 struct iomap *iomap)
271 {
272         long status = 0;
273         ssize_t written = 0;
274
275         do {
276                 struct page *page, *rpage;
277                 unsigned long offset;   /* Offset into pagecache page */
278                 unsigned long bytes;    /* Bytes to write to page */
279
280                 offset = (pos & (PAGE_SIZE - 1));
281                 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
282
283                 rpage = __iomap_read_page(inode, pos);
284                 if (IS_ERR(rpage))
285                         return PTR_ERR(rpage);
286
287                 status = iomap_write_begin(inode, pos, bytes,
288                                            AOP_FLAG_NOFS, &page, iomap);
289                 put_page(rpage);
290                 if (unlikely(status))
291                         return status;
292
293                 WARN_ON_ONCE(!PageUptodate(page));
294
295                 status = iomap_write_end(inode, pos, bytes, bytes, page);
296                 if (unlikely(status <= 0)) {
297                         if (WARN_ON_ONCE(status == 0))
298                                 return -EIO;
299                         return status;
300                 }
301
302                 cond_resched();
303
304                 pos += status;
305                 written += status;
306                 length -= status;
307
308                 balance_dirty_pages_ratelimited(inode->i_mapping);
309         } while (length);
310
311         return written;
312 }
313
314 int
315 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
316                 const struct iomap_ops *ops)
317 {
318         loff_t ret;
319
320         while (len) {
321                 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
322                                 iomap_dirty_actor);
323                 if (ret <= 0)
324                         return ret;
325                 pos += ret;
326                 len -= ret;
327         }
328
329         return 0;
330 }
331 EXPORT_SYMBOL_GPL(iomap_file_dirty);
332
333 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
334                 unsigned bytes, struct iomap *iomap)
335 {
336         struct page *page;
337         int status;
338
339         status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
340                                    iomap);
341         if (status)
342                 return status;
343
344         zero_user(page, offset, bytes);
345         mark_page_accessed(page);
346
347         return iomap_write_end(inode, pos, bytes, bytes, page);
348 }
349
350 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
351                 struct iomap *iomap)
352 {
353         sector_t sector = (iomap->addr +
354                            (pos & PAGE_MASK) - iomap->offset) >> 9;
355
356         return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
357                         offset, bytes);
358 }
359
360 static loff_t
361 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
362                 void *data, struct iomap *iomap)
363 {
364         bool *did_zero = data;
365         loff_t written = 0;
366         int status;
367
368         /* already zeroed?  we're done. */
369         if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
370                 return count;
371
372         do {
373                 unsigned offset, bytes;
374
375                 offset = pos & (PAGE_SIZE - 1); /* Within page */
376                 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
377
378                 if (IS_DAX(inode))
379                         status = iomap_dax_zero(pos, offset, bytes, iomap);
380                 else
381                         status = iomap_zero(inode, pos, offset, bytes, iomap);
382                 if (status < 0)
383                         return status;
384
385                 pos += bytes;
386                 count -= bytes;
387                 written += bytes;
388                 if (did_zero)
389                         *did_zero = true;
390         } while (count > 0);
391
392         return written;
393 }
394
395 int
396 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
397                 const struct iomap_ops *ops)
398 {
399         loff_t ret;
400
401         while (len > 0) {
402                 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
403                                 ops, did_zero, iomap_zero_range_actor);
404                 if (ret <= 0)
405                         return ret;
406
407                 pos += ret;
408                 len -= ret;
409         }
410
411         return 0;
412 }
413 EXPORT_SYMBOL_GPL(iomap_zero_range);
414
415 int
416 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
417                 const struct iomap_ops *ops)
418 {
419         unsigned int blocksize = i_blocksize(inode);
420         unsigned int off = pos & (blocksize - 1);
421
422         /* Block boundary? Nothing to do */
423         if (!off)
424                 return 0;
425         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
426 }
427 EXPORT_SYMBOL_GPL(iomap_truncate_page);
428
429 static loff_t
430 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
431                 void *data, struct iomap *iomap)
432 {
433         struct page *page = data;
434         int ret;
435
436         ret = __block_write_begin_int(page, pos, length, NULL, iomap);
437         if (ret)
438                 return ret;
439
440         block_commit_write(page, 0, length);
441         return length;
442 }
443
444 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
445 {
446         struct page *page = vmf->page;
447         struct inode *inode = file_inode(vmf->vma->vm_file);
448         unsigned long length;
449         loff_t offset, size;
450         ssize_t ret;
451
452         lock_page(page);
453         size = i_size_read(inode);
454         if ((page->mapping != inode->i_mapping) ||
455             (page_offset(page) > size)) {
456                 /* We overload EFAULT to mean page got truncated */
457                 ret = -EFAULT;
458                 goto out_unlock;
459         }
460
461         /* page is wholly or partially inside EOF */
462         if (((page->index + 1) << PAGE_SHIFT) > size)
463                 length = size & ~PAGE_MASK;
464         else
465                 length = PAGE_SIZE;
466
467         offset = page_offset(page);
468         while (length > 0) {
469                 ret = iomap_apply(inode, offset, length,
470                                 IOMAP_WRITE | IOMAP_FAULT, ops, page,
471                                 iomap_page_mkwrite_actor);
472                 if (unlikely(ret <= 0))
473                         goto out_unlock;
474                 offset += ret;
475                 length -= ret;
476         }
477
478         set_page_dirty(page);
479         wait_for_stable_page(page);
480         return VM_FAULT_LOCKED;
481 out_unlock:
482         unlock_page(page);
483         return block_page_mkwrite_return(ret);
484 }
485 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
486
487 struct fiemap_ctx {
488         struct fiemap_extent_info *fi;
489         struct iomap prev;
490 };
491
492 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
493                 struct iomap *iomap, u32 flags)
494 {
495         switch (iomap->type) {
496         case IOMAP_HOLE:
497                 /* skip holes */
498                 return 0;
499         case IOMAP_DELALLOC:
500                 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
501                 break;
502         case IOMAP_UNWRITTEN:
503                 flags |= FIEMAP_EXTENT_UNWRITTEN;
504                 break;
505         case IOMAP_MAPPED:
506                 break;
507         }
508
509         if (iomap->flags & IOMAP_F_MERGED)
510                 flags |= FIEMAP_EXTENT_MERGED;
511         if (iomap->flags & IOMAP_F_SHARED)
512                 flags |= FIEMAP_EXTENT_SHARED;
513         if (iomap->flags & IOMAP_F_DATA_INLINE)
514                 flags |= FIEMAP_EXTENT_DATA_INLINE;
515
516         return fiemap_fill_next_extent(fi, iomap->offset,
517                         iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
518                         iomap->length, flags);
519 }
520
521 static loff_t
522 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
523                 struct iomap *iomap)
524 {
525         struct fiemap_ctx *ctx = data;
526         loff_t ret = length;
527
528         if (iomap->type == IOMAP_HOLE)
529                 return length;
530
531         ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
532         ctx->prev = *iomap;
533         switch (ret) {
534         case 0:         /* success */
535                 return length;
536         case 1:         /* extent array full */
537                 return 0;
538         default:
539                 return ret;
540         }
541 }
542
543 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
544                 loff_t start, loff_t len, const struct iomap_ops *ops)
545 {
546         struct fiemap_ctx ctx;
547         loff_t ret;
548
549         memset(&ctx, 0, sizeof(ctx));
550         ctx.fi = fi;
551         ctx.prev.type = IOMAP_HOLE;
552
553         ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
554         if (ret)
555                 return ret;
556
557         if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
558                 ret = filemap_write_and_wait(inode->i_mapping);
559                 if (ret)
560                         return ret;
561         }
562
563         while (len > 0) {
564                 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
565                                 iomap_fiemap_actor);
566                 /* inode with no (attribute) mapping will give ENOENT */
567                 if (ret == -ENOENT)
568                         break;
569                 if (ret < 0)
570                         return ret;
571                 if (ret == 0)
572                         break;
573
574                 start += ret;
575                 len -= ret;
576         }
577
578         if (ctx.prev.type != IOMAP_HOLE) {
579                 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
580                 if (ret < 0)
581                         return ret;
582         }
583
584         return 0;
585 }
586 EXPORT_SYMBOL_GPL(iomap_fiemap);
587
588 static loff_t
589 iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
590                       void *data, struct iomap *iomap)
591 {
592         switch (iomap->type) {
593         case IOMAP_UNWRITTEN:
594                 offset = page_cache_seek_hole_data(inode, offset, length,
595                                                    SEEK_HOLE);
596                 if (offset < 0)
597                         return length;
598                 /* fall through */
599         case IOMAP_HOLE:
600                 *(loff_t *)data = offset;
601                 return 0;
602         default:
603                 return length;
604         }
605 }
606
607 loff_t
608 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
609 {
610         loff_t size = i_size_read(inode);
611         loff_t length = size - offset;
612         loff_t ret;
613
614         /* Nothing to be found before or beyond the end of the file. */
615         if (offset < 0 || offset >= size)
616                 return -ENXIO;
617
618         while (length > 0) {
619                 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
620                                   &offset, iomap_seek_hole_actor);
621                 if (ret < 0)
622                         return ret;
623                 if (ret == 0)
624                         break;
625
626                 offset += ret;
627                 length -= ret;
628         }
629
630         return offset;
631 }
632 EXPORT_SYMBOL_GPL(iomap_seek_hole);
633
634 static loff_t
635 iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
636                       void *data, struct iomap *iomap)
637 {
638         switch (iomap->type) {
639         case IOMAP_HOLE:
640                 return length;
641         case IOMAP_UNWRITTEN:
642                 offset = page_cache_seek_hole_data(inode, offset, length,
643                                                    SEEK_DATA);
644                 if (offset < 0)
645                         return length;
646                 /*FALLTHRU*/
647         default:
648                 *(loff_t *)data = offset;
649                 return 0;
650         }
651 }
652
653 loff_t
654 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
655 {
656         loff_t size = i_size_read(inode);
657         loff_t length = size - offset;
658         loff_t ret;
659
660         /* Nothing to be found before or beyond the end of the file. */
661         if (offset < 0 || offset >= size)
662                 return -ENXIO;
663
664         while (length > 0) {
665                 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
666                                   &offset, iomap_seek_data_actor);
667                 if (ret < 0)
668                         return ret;
669                 if (ret == 0)
670                         break;
671
672                 offset += ret;
673                 length -= ret;
674         }
675
676         if (length <= 0)
677                 return -ENXIO;
678         return offset;
679 }
680 EXPORT_SYMBOL_GPL(iomap_seek_data);
681
682 /*
683  * Private flags for iomap_dio, must not overlap with the public ones in
684  * iomap.h:
685  */
686 #define IOMAP_DIO_WRITE         (1 << 30)
687 #define IOMAP_DIO_DIRTY         (1 << 31)
688
689 struct iomap_dio {
690         struct kiocb            *iocb;
691         iomap_dio_end_io_t      *end_io;
692         loff_t                  i_size;
693         loff_t                  size;
694         atomic_t                ref;
695         unsigned                flags;
696         int                     error;
697
698         union {
699                 /* used during submission and for synchronous completion: */
700                 struct {
701                         struct iov_iter         *iter;
702                         struct task_struct      *waiter;
703                         struct request_queue    *last_queue;
704                         blk_qc_t                cookie;
705                 } submit;
706
707                 /* used for aio completion: */
708                 struct {
709                         struct work_struct      work;
710                 } aio;
711         };
712 };
713
714 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
715 {
716         struct kiocb *iocb = dio->iocb;
717         struct inode *inode = file_inode(iocb->ki_filp);
718         loff_t offset = iocb->ki_pos;
719         ssize_t ret;
720
721         if (dio->end_io) {
722                 ret = dio->end_io(iocb,
723                                 dio->error ? dio->error : dio->size,
724                                 dio->flags);
725         } else {
726                 ret = dio->error;
727         }
728
729         if (likely(!ret)) {
730                 ret = dio->size;
731                 /* check for short read */
732                 if (offset + ret > dio->i_size &&
733                     !(dio->flags & IOMAP_DIO_WRITE))
734                         ret = dio->i_size - offset;
735                 iocb->ki_pos += ret;
736         }
737
738         /*
739          * Try again to invalidate clean pages which might have been cached by
740          * non-direct readahead, or faulted in by get_user_pages() if the source
741          * of the write was an mmap'ed region of the file we're writing.  Either
742          * one is a pretty crazy thing to do, so we don't support it 100%.  If
743          * this invalidation fails, tough, the write still worked...
744          *
745          * And this page cache invalidation has to be after dio->end_io(), as
746          * some filesystems convert unwritten extents to real allocations in
747          * end_io() when necessary, otherwise a racing buffer read would cache
748          * zeros from unwritten extents.
749          */
750         if (!dio->error &&
751             (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
752                 int err;
753                 err = invalidate_inode_pages2_range(inode->i_mapping,
754                                 offset >> PAGE_SHIFT,
755                                 (offset + dio->size - 1) >> PAGE_SHIFT);
756                 WARN_ON_ONCE(err);
757         }
758
759         inode_dio_end(file_inode(iocb->ki_filp));
760         kfree(dio);
761
762         return ret;
763 }
764
765 static void iomap_dio_complete_work(struct work_struct *work)
766 {
767         struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
768         struct kiocb *iocb = dio->iocb;
769         bool is_write = (dio->flags & IOMAP_DIO_WRITE);
770         ssize_t ret;
771
772         ret = iomap_dio_complete(dio);
773         if (is_write && ret > 0)
774                 ret = generic_write_sync(iocb, ret);
775         iocb->ki_complete(iocb, ret, 0);
776 }
777
778 /*
779  * Set an error in the dio if none is set yet.  We have to use cmpxchg
780  * as the submission context and the completion context(s) can race to
781  * update the error.
782  */
783 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
784 {
785         cmpxchg(&dio->error, 0, ret);
786 }
787
788 static void iomap_dio_bio_end_io(struct bio *bio)
789 {
790         struct iomap_dio *dio = bio->bi_private;
791         bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
792
793         if (bio->bi_status)
794                 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
795
796         if (atomic_dec_and_test(&dio->ref)) {
797                 if (is_sync_kiocb(dio->iocb)) {
798                         struct task_struct *waiter = dio->submit.waiter;
799
800                         WRITE_ONCE(dio->submit.waiter, NULL);
801                         wake_up_process(waiter);
802                 } else if (dio->flags & IOMAP_DIO_WRITE) {
803                         struct inode *inode = file_inode(dio->iocb->ki_filp);
804
805                         INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
806                         queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
807                 } else {
808                         iomap_dio_complete_work(&dio->aio.work);
809                 }
810         }
811
812         if (should_dirty) {
813                 bio_check_pages_dirty(bio);
814         } else {
815                 struct bio_vec *bvec;
816                 int i;
817
818                 bio_for_each_segment_all(bvec, bio, i)
819                         put_page(bvec->bv_page);
820                 bio_put(bio);
821         }
822 }
823
824 static blk_qc_t
825 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
826                 unsigned len)
827 {
828         struct page *page = ZERO_PAGE(0);
829         struct bio *bio;
830
831         bio = bio_alloc(GFP_KERNEL, 1);
832         bio_set_dev(bio, iomap->bdev);
833         bio->bi_iter.bi_sector =
834                 (iomap->addr + pos - iomap->offset) >> 9;
835         bio->bi_private = dio;
836         bio->bi_end_io = iomap_dio_bio_end_io;
837
838         get_page(page);
839         if (bio_add_page(bio, page, len, 0) != len)
840                 BUG();
841         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
842
843         atomic_inc(&dio->ref);
844         return submit_bio(bio);
845 }
846
847 static loff_t
848 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
849                 void *data, struct iomap *iomap)
850 {
851         struct iomap_dio *dio = data;
852         unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
853         unsigned int fs_block_size = i_blocksize(inode), pad;
854         unsigned int align = iov_iter_alignment(dio->submit.iter);
855         struct iov_iter iter;
856         struct bio *bio;
857         bool need_zeroout = false;
858         int nr_pages, ret;
859         size_t copied = 0;
860
861         if ((pos | length | align) & ((1 << blkbits) - 1))
862                 return -EINVAL;
863
864         switch (iomap->type) {
865         case IOMAP_HOLE:
866                 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
867                         return -EIO;
868                 /*FALLTHRU*/
869         case IOMAP_UNWRITTEN:
870                 if (!(dio->flags & IOMAP_DIO_WRITE)) {
871                         length = iov_iter_zero(length, dio->submit.iter);
872                         dio->size += length;
873                         return length;
874                 }
875                 dio->flags |= IOMAP_DIO_UNWRITTEN;
876                 need_zeroout = true;
877                 break;
878         case IOMAP_MAPPED:
879                 if (iomap->flags & IOMAP_F_SHARED)
880                         dio->flags |= IOMAP_DIO_COW;
881                 if (iomap->flags & IOMAP_F_NEW)
882                         need_zeroout = true;
883                 break;
884         default:
885                 WARN_ON_ONCE(1);
886                 return -EIO;
887         }
888
889         /*
890          * Operate on a partial iter trimmed to the extent we were called for.
891          * We'll update the iter in the dio once we're done with this extent.
892          */
893         iter = *dio->submit.iter;
894         iov_iter_truncate(&iter, length);
895
896         nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
897         if (nr_pages <= 0)
898                 return nr_pages;
899
900         if (need_zeroout) {
901                 /* zero out from the start of the block to the write offset */
902                 pad = pos & (fs_block_size - 1);
903                 if (pad)
904                         iomap_dio_zero(dio, iomap, pos - pad, pad);
905         }
906
907         do {
908                 size_t n;
909                 if (dio->error) {
910                         iov_iter_revert(dio->submit.iter, copied);
911                         return 0;
912                 }
913
914                 bio = bio_alloc(GFP_KERNEL, nr_pages);
915                 bio_set_dev(bio, iomap->bdev);
916                 bio->bi_iter.bi_sector =
917                         (iomap->addr + pos - iomap->offset) >> 9;
918                 bio->bi_write_hint = dio->iocb->ki_hint;
919                 bio->bi_private = dio;
920                 bio->bi_end_io = iomap_dio_bio_end_io;
921
922                 ret = bio_iov_iter_get_pages(bio, &iter);
923                 if (unlikely(ret)) {
924                         bio_put(bio);
925                         return copied ? copied : ret;
926                 }
927
928                 n = bio->bi_iter.bi_size;
929                 if (dio->flags & IOMAP_DIO_WRITE) {
930                         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
931                         task_io_account_write(n);
932                 } else {
933                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
934                         if (dio->flags & IOMAP_DIO_DIRTY)
935                                 bio_set_pages_dirty(bio);
936                 }
937
938                 iov_iter_advance(dio->submit.iter, n);
939
940                 dio->size += n;
941                 pos += n;
942                 copied += n;
943
944                 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
945
946                 atomic_inc(&dio->ref);
947
948                 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
949                 dio->submit.cookie = submit_bio(bio);
950         } while (nr_pages);
951
952         if (need_zeroout) {
953                 /* zero out from the end of the write to the end of the block */
954                 pad = pos & (fs_block_size - 1);
955                 if (pad)
956                         iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
957         }
958         return copied;
959 }
960
961 ssize_t
962 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
963                 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
964 {
965         struct address_space *mapping = iocb->ki_filp->f_mapping;
966         struct inode *inode = file_inode(iocb->ki_filp);
967         size_t count = iov_iter_count(iter);
968         loff_t pos = iocb->ki_pos, start = pos;
969         loff_t end = iocb->ki_pos + count - 1, ret = 0;
970         unsigned int flags = IOMAP_DIRECT;
971         struct blk_plug plug;
972         struct iomap_dio *dio;
973
974         lockdep_assert_held(&inode->i_rwsem);
975
976         if (!count)
977                 return 0;
978
979         dio = kmalloc(sizeof(*dio), GFP_KERNEL);
980         if (!dio)
981                 return -ENOMEM;
982
983         dio->iocb = iocb;
984         atomic_set(&dio->ref, 1);
985         dio->size = 0;
986         dio->i_size = i_size_read(inode);
987         dio->end_io = end_io;
988         dio->error = 0;
989         dio->flags = 0;
990
991         dio->submit.iter = iter;
992         if (is_sync_kiocb(iocb)) {
993                 dio->submit.waiter = current;
994                 dio->submit.cookie = BLK_QC_T_NONE;
995                 dio->submit.last_queue = NULL;
996         }
997
998         if (iov_iter_rw(iter) == READ) {
999                 if (pos >= dio->i_size)
1000                         goto out_free_dio;
1001
1002                 if (iter->type == ITER_IOVEC)
1003                         dio->flags |= IOMAP_DIO_DIRTY;
1004         } else {
1005                 dio->flags |= IOMAP_DIO_WRITE;
1006                 flags |= IOMAP_WRITE;
1007         }
1008
1009         if (iocb->ki_flags & IOCB_NOWAIT) {
1010                 if (filemap_range_has_page(mapping, start, end)) {
1011                         ret = -EAGAIN;
1012                         goto out_free_dio;
1013                 }
1014                 flags |= IOMAP_NOWAIT;
1015         }
1016
1017         ret = filemap_write_and_wait_range(mapping, start, end);
1018         if (ret)
1019                 goto out_free_dio;
1020
1021         ret = invalidate_inode_pages2_range(mapping,
1022                         start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1023         WARN_ON_ONCE(ret);
1024         ret = 0;
1025
1026         if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1027             !inode->i_sb->s_dio_done_wq) {
1028                 ret = sb_init_dio_done_wq(inode->i_sb);
1029                 if (ret < 0)
1030                         goto out_free_dio;
1031         }
1032
1033         inode_dio_begin(inode);
1034
1035         blk_start_plug(&plug);
1036         do {
1037                 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1038                                 iomap_dio_actor);
1039                 if (ret <= 0) {
1040                         /* magic error code to fall back to buffered I/O */
1041                         if (ret == -ENOTBLK)
1042                                 ret = 0;
1043                         break;
1044                 }
1045                 pos += ret;
1046
1047                 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1048                         break;
1049         } while ((count = iov_iter_count(iter)) > 0);
1050         blk_finish_plug(&plug);
1051
1052         if (ret < 0)
1053                 iomap_dio_set_error(dio, ret);
1054
1055         if (!atomic_dec_and_test(&dio->ref)) {
1056                 if (!is_sync_kiocb(iocb))
1057                         return -EIOCBQUEUED;
1058
1059                 for (;;) {
1060                         set_current_state(TASK_UNINTERRUPTIBLE);
1061                         if (!READ_ONCE(dio->submit.waiter))
1062                                 break;
1063
1064                         if (!(iocb->ki_flags & IOCB_HIPRI) ||
1065                             !dio->submit.last_queue ||
1066                             !blk_poll(dio->submit.last_queue,
1067                                          dio->submit.cookie))
1068                                 io_schedule();
1069                 }
1070                 __set_current_state(TASK_RUNNING);
1071         }
1072
1073         ret = iomap_dio_complete(dio);
1074
1075         return ret;
1076
1077 out_free_dio:
1078         kfree(dio);
1079         return ret;
1080 }
1081 EXPORT_SYMBOL_GPL(iomap_dio_rw);