btrfs: Make flush bios explicitely sync
[sfrench/cifs-2.6.git] / fs / iomap.c
1 /*
2  * Copyright (C) 2010 Red Hat, Inc.
3  * Copyright (c) 2016 Christoph Hellwig.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include <linux/sched/signal.h>
30
31 #include "internal.h"
32
33 /*
34  * Execute a iomap write on a segment of the mapping that spans a
35  * contiguous range of pages that have identical block mapping state.
36  *
37  * This avoids the need to map pages individually, do individual allocations
38  * for each page and most importantly avoid the need for filesystem specific
39  * locking per page. Instead, all the operations are amortised over the entire
40  * range of pages. It is assumed that the filesystems will lock whatever
41  * resources they require in the iomap_begin call, and release them in the
42  * iomap_end call.
43  */
44 loff_t
45 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
46                 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
47 {
48         struct iomap iomap = { 0 };
49         loff_t written = 0, ret;
50
51         /*
52          * Need to map a range from start position for length bytes. This can
53          * span multiple pages - it is only guaranteed to return a range of a
54          * single type of pages (e.g. all into a hole, all mapped or all
55          * unwritten). Failure at this point has nothing to undo.
56          *
57          * If allocation is required for this range, reserve the space now so
58          * that the allocation is guaranteed to succeed later on. Once we copy
59          * the data into the page cache pages, then we cannot fail otherwise we
60          * expose transient stale data. If the reserve fails, we can safely
61          * back out at this point as there is nothing to undo.
62          */
63         ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
64         if (ret)
65                 return ret;
66         if (WARN_ON(iomap.offset > pos))
67                 return -EIO;
68
69         /*
70          * Cut down the length to the one actually provided by the filesystem,
71          * as it might not be able to give us the whole size that we requested.
72          */
73         if (iomap.offset + iomap.length < pos + length)
74                 length = iomap.offset + iomap.length - pos;
75
76         /*
77          * Now that we have guaranteed that the space allocation will succeed.
78          * we can do the copy-in page by page without having to worry about
79          * failures exposing transient data.
80          */
81         written = actor(inode, pos, length, data, &iomap);
82
83         /*
84          * Now the data has been copied, commit the range we've copied.  This
85          * should not fail unless the filesystem has had a fatal error.
86          */
87         if (ops->iomap_end) {
88                 ret = ops->iomap_end(inode, pos, length,
89                                      written > 0 ? written : 0,
90                                      flags, &iomap);
91         }
92
93         return written ? written : ret;
94 }
95
96 static void
97 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
98 {
99         loff_t i_size = i_size_read(inode);
100
101         /*
102          * Only truncate newly allocated pages beyoned EOF, even if the
103          * write started inside the existing inode size.
104          */
105         if (pos + len > i_size)
106                 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
107 }
108
109 static int
110 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
111                 struct page **pagep, struct iomap *iomap)
112 {
113         pgoff_t index = pos >> PAGE_SHIFT;
114         struct page *page;
115         int status = 0;
116
117         BUG_ON(pos + len > iomap->offset + iomap->length);
118
119         if (fatal_signal_pending(current))
120                 return -EINTR;
121
122         page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
123         if (!page)
124                 return -ENOMEM;
125
126         status = __block_write_begin_int(page, pos, len, NULL, iomap);
127         if (unlikely(status)) {
128                 unlock_page(page);
129                 put_page(page);
130                 page = NULL;
131
132                 iomap_write_failed(inode, pos, len);
133         }
134
135         *pagep = page;
136         return status;
137 }
138
139 static int
140 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
141                 unsigned copied, struct page *page)
142 {
143         int ret;
144
145         ret = generic_write_end(NULL, inode->i_mapping, pos, len,
146                         copied, page, NULL);
147         if (ret < len)
148                 iomap_write_failed(inode, pos, len);
149         return ret;
150 }
151
152 static loff_t
153 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
154                 struct iomap *iomap)
155 {
156         struct iov_iter *i = data;
157         long status = 0;
158         ssize_t written = 0;
159         unsigned int flags = AOP_FLAG_NOFS;
160
161         /*
162          * Copies from kernel address space cannot fail (NFSD is a big user).
163          */
164         if (!iter_is_iovec(i))
165                 flags |= AOP_FLAG_UNINTERRUPTIBLE;
166
167         do {
168                 struct page *page;
169                 unsigned long offset;   /* Offset into pagecache page */
170                 unsigned long bytes;    /* Bytes to write to page */
171                 size_t copied;          /* Bytes copied from user */
172
173                 offset = (pos & (PAGE_SIZE - 1));
174                 bytes = min_t(unsigned long, PAGE_SIZE - offset,
175                                                 iov_iter_count(i));
176 again:
177                 if (bytes > length)
178                         bytes = length;
179
180                 /*
181                  * Bring in the user page that we will copy from _first_.
182                  * Otherwise there's a nasty deadlock on copying from the
183                  * same page as we're writing to, without it being marked
184                  * up-to-date.
185                  *
186                  * Not only is this an optimisation, but it is also required
187                  * to check that the address is actually valid, when atomic
188                  * usercopies are used, below.
189                  */
190                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
191                         status = -EFAULT;
192                         break;
193                 }
194
195                 status = iomap_write_begin(inode, pos, bytes, flags, &page,
196                                 iomap);
197                 if (unlikely(status))
198                         break;
199
200                 if (mapping_writably_mapped(inode->i_mapping))
201                         flush_dcache_page(page);
202
203                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
204
205                 flush_dcache_page(page);
206
207                 status = iomap_write_end(inode, pos, bytes, copied, page);
208                 if (unlikely(status < 0))
209                         break;
210                 copied = status;
211
212                 cond_resched();
213
214                 iov_iter_advance(i, copied);
215                 if (unlikely(copied == 0)) {
216                         /*
217                          * If we were unable to copy any data at all, we must
218                          * fall back to a single segment length write.
219                          *
220                          * If we didn't fallback here, we could livelock
221                          * because not all segments in the iov can be copied at
222                          * once without a pagefault.
223                          */
224                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
225                                                 iov_iter_single_seg_count(i));
226                         goto again;
227                 }
228                 pos += copied;
229                 written += copied;
230                 length -= copied;
231
232                 balance_dirty_pages_ratelimited(inode->i_mapping);
233         } while (iov_iter_count(i) && length);
234
235         return written ? written : status;
236 }
237
238 ssize_t
239 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
240                 const struct iomap_ops *ops)
241 {
242         struct inode *inode = iocb->ki_filp->f_mapping->host;
243         loff_t pos = iocb->ki_pos, ret = 0, written = 0;
244
245         while (iov_iter_count(iter)) {
246                 ret = iomap_apply(inode, pos, iov_iter_count(iter),
247                                 IOMAP_WRITE, ops, iter, iomap_write_actor);
248                 if (ret <= 0)
249                         break;
250                 pos += ret;
251                 written += ret;
252         }
253
254         return written ? written : ret;
255 }
256 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
257
258 static struct page *
259 __iomap_read_page(struct inode *inode, loff_t offset)
260 {
261         struct address_space *mapping = inode->i_mapping;
262         struct page *page;
263
264         page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
265         if (IS_ERR(page))
266                 return page;
267         if (!PageUptodate(page)) {
268                 put_page(page);
269                 return ERR_PTR(-EIO);
270         }
271         return page;
272 }
273
274 static loff_t
275 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
276                 struct iomap *iomap)
277 {
278         long status = 0;
279         ssize_t written = 0;
280
281         do {
282                 struct page *page, *rpage;
283                 unsigned long offset;   /* Offset into pagecache page */
284                 unsigned long bytes;    /* Bytes to write to page */
285
286                 offset = (pos & (PAGE_SIZE - 1));
287                 bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
288
289                 rpage = __iomap_read_page(inode, pos);
290                 if (IS_ERR(rpage))
291                         return PTR_ERR(rpage);
292
293                 status = iomap_write_begin(inode, pos, bytes,
294                                 AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
295                                 &page, iomap);
296                 put_page(rpage);
297                 if (unlikely(status))
298                         return status;
299
300                 WARN_ON_ONCE(!PageUptodate(page));
301
302                 status = iomap_write_end(inode, pos, bytes, bytes, page);
303                 if (unlikely(status <= 0)) {
304                         if (WARN_ON_ONCE(status == 0))
305                                 return -EIO;
306                         return status;
307                 }
308
309                 cond_resched();
310
311                 pos += status;
312                 written += status;
313                 length -= status;
314
315                 balance_dirty_pages_ratelimited(inode->i_mapping);
316         } while (length);
317
318         return written;
319 }
320
321 int
322 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
323                 const struct iomap_ops *ops)
324 {
325         loff_t ret;
326
327         while (len) {
328                 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
329                                 iomap_dirty_actor);
330                 if (ret <= 0)
331                         return ret;
332                 pos += ret;
333                 len -= ret;
334         }
335
336         return 0;
337 }
338 EXPORT_SYMBOL_GPL(iomap_file_dirty);
339
340 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
341                 unsigned bytes, struct iomap *iomap)
342 {
343         struct page *page;
344         int status;
345
346         status = iomap_write_begin(inode, pos, bytes,
347                         AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
348         if (status)
349                 return status;
350
351         zero_user(page, offset, bytes);
352         mark_page_accessed(page);
353
354         return iomap_write_end(inode, pos, bytes, bytes, page);
355 }
356
357 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
358                 struct iomap *iomap)
359 {
360         sector_t sector = iomap->blkno +
361                 (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
362
363         return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
364 }
365
366 static loff_t
367 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
368                 void *data, struct iomap *iomap)
369 {
370         bool *did_zero = data;
371         loff_t written = 0;
372         int status;
373
374         /* already zeroed?  we're done. */
375         if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
376                 return count;
377
378         do {
379                 unsigned offset, bytes;
380
381                 offset = pos & (PAGE_SIZE - 1); /* Within page */
382                 bytes = min_t(unsigned, PAGE_SIZE - offset, count);
383
384                 if (IS_DAX(inode))
385                         status = iomap_dax_zero(pos, offset, bytes, iomap);
386                 else
387                         status = iomap_zero(inode, pos, offset, bytes, iomap);
388                 if (status < 0)
389                         return status;
390
391                 pos += bytes;
392                 count -= bytes;
393                 written += bytes;
394                 if (did_zero)
395                         *did_zero = true;
396         } while (count > 0);
397
398         return written;
399 }
400
401 int
402 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
403                 const struct iomap_ops *ops)
404 {
405         loff_t ret;
406
407         while (len > 0) {
408                 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
409                                 ops, did_zero, iomap_zero_range_actor);
410                 if (ret <= 0)
411                         return ret;
412
413                 pos += ret;
414                 len -= ret;
415         }
416
417         return 0;
418 }
419 EXPORT_SYMBOL_GPL(iomap_zero_range);
420
421 int
422 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
423                 const struct iomap_ops *ops)
424 {
425         unsigned int blocksize = i_blocksize(inode);
426         unsigned int off = pos & (blocksize - 1);
427
428         /* Block boundary? Nothing to do */
429         if (!off)
430                 return 0;
431         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
432 }
433 EXPORT_SYMBOL_GPL(iomap_truncate_page);
434
435 static loff_t
436 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
437                 void *data, struct iomap *iomap)
438 {
439         struct page *page = data;
440         int ret;
441
442         ret = __block_write_begin_int(page, pos, length, NULL, iomap);
443         if (ret)
444                 return ret;
445
446         block_commit_write(page, 0, length);
447         return length;
448 }
449
450 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
451 {
452         struct page *page = vmf->page;
453         struct inode *inode = file_inode(vmf->vma->vm_file);
454         unsigned long length;
455         loff_t offset, size;
456         ssize_t ret;
457
458         lock_page(page);
459         size = i_size_read(inode);
460         if ((page->mapping != inode->i_mapping) ||
461             (page_offset(page) > size)) {
462                 /* We overload EFAULT to mean page got truncated */
463                 ret = -EFAULT;
464                 goto out_unlock;
465         }
466
467         /* page is wholly or partially inside EOF */
468         if (((page->index + 1) << PAGE_SHIFT) > size)
469                 length = size & ~PAGE_MASK;
470         else
471                 length = PAGE_SIZE;
472
473         offset = page_offset(page);
474         while (length > 0) {
475                 ret = iomap_apply(inode, offset, length,
476                                 IOMAP_WRITE | IOMAP_FAULT, ops, page,
477                                 iomap_page_mkwrite_actor);
478                 if (unlikely(ret <= 0))
479                         goto out_unlock;
480                 offset += ret;
481                 length -= ret;
482         }
483
484         set_page_dirty(page);
485         wait_for_stable_page(page);
486         return 0;
487 out_unlock:
488         unlock_page(page);
489         return ret;
490 }
491 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
492
493 struct fiemap_ctx {
494         struct fiemap_extent_info *fi;
495         struct iomap prev;
496 };
497
498 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
499                 struct iomap *iomap, u32 flags)
500 {
501         switch (iomap->type) {
502         case IOMAP_HOLE:
503                 /* skip holes */
504                 return 0;
505         case IOMAP_DELALLOC:
506                 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
507                 break;
508         case IOMAP_UNWRITTEN:
509                 flags |= FIEMAP_EXTENT_UNWRITTEN;
510                 break;
511         case IOMAP_MAPPED:
512                 break;
513         }
514
515         if (iomap->flags & IOMAP_F_MERGED)
516                 flags |= FIEMAP_EXTENT_MERGED;
517         if (iomap->flags & IOMAP_F_SHARED)
518                 flags |= FIEMAP_EXTENT_SHARED;
519
520         return fiemap_fill_next_extent(fi, iomap->offset,
521                         iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
522                         iomap->length, flags);
523
524 }
525
526 static loff_t
527 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
528                 struct iomap *iomap)
529 {
530         struct fiemap_ctx *ctx = data;
531         loff_t ret = length;
532
533         if (iomap->type == IOMAP_HOLE)
534                 return length;
535
536         ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
537         ctx->prev = *iomap;
538         switch (ret) {
539         case 0:         /* success */
540                 return length;
541         case 1:         /* extent array full */
542                 return 0;
543         default:
544                 return ret;
545         }
546 }
547
548 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
549                 loff_t start, loff_t len, const struct iomap_ops *ops)
550 {
551         struct fiemap_ctx ctx;
552         loff_t ret;
553
554         memset(&ctx, 0, sizeof(ctx));
555         ctx.fi = fi;
556         ctx.prev.type = IOMAP_HOLE;
557
558         ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
559         if (ret)
560                 return ret;
561
562         if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
563                 ret = filemap_write_and_wait(inode->i_mapping);
564                 if (ret)
565                         return ret;
566         }
567
568         while (len > 0) {
569                 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
570                                 iomap_fiemap_actor);
571                 /* inode with no (attribute) mapping will give ENOENT */
572                 if (ret == -ENOENT)
573                         break;
574                 if (ret < 0)
575                         return ret;
576                 if (ret == 0)
577                         break;
578
579                 start += ret;
580                 len -= ret;
581         }
582
583         if (ctx.prev.type != IOMAP_HOLE) {
584                 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
585                 if (ret < 0)
586                         return ret;
587         }
588
589         return 0;
590 }
591 EXPORT_SYMBOL_GPL(iomap_fiemap);
592
593 /*
594  * Private flags for iomap_dio, must not overlap with the public ones in
595  * iomap.h:
596  */
597 #define IOMAP_DIO_WRITE         (1 << 30)
598 #define IOMAP_DIO_DIRTY         (1 << 31)
599
600 struct iomap_dio {
601         struct kiocb            *iocb;
602         iomap_dio_end_io_t      *end_io;
603         loff_t                  i_size;
604         loff_t                  size;
605         atomic_t                ref;
606         unsigned                flags;
607         int                     error;
608
609         union {
610                 /* used during submission and for synchronous completion: */
611                 struct {
612                         struct iov_iter         *iter;
613                         struct task_struct      *waiter;
614                         struct request_queue    *last_queue;
615                         blk_qc_t                cookie;
616                 } submit;
617
618                 /* used for aio completion: */
619                 struct {
620                         struct work_struct      work;
621                 } aio;
622         };
623 };
624
625 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
626 {
627         struct kiocb *iocb = dio->iocb;
628         ssize_t ret;
629
630         if (dio->end_io) {
631                 ret = dio->end_io(iocb,
632                                 dio->error ? dio->error : dio->size,
633                                 dio->flags);
634         } else {
635                 ret = dio->error;
636         }
637
638         if (likely(!ret)) {
639                 ret = dio->size;
640                 /* check for short read */
641                 if (iocb->ki_pos + ret > dio->i_size &&
642                     !(dio->flags & IOMAP_DIO_WRITE))
643                         ret = dio->i_size - iocb->ki_pos;
644                 iocb->ki_pos += ret;
645         }
646
647         inode_dio_end(file_inode(iocb->ki_filp));
648         kfree(dio);
649
650         return ret;
651 }
652
653 static void iomap_dio_complete_work(struct work_struct *work)
654 {
655         struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
656         struct kiocb *iocb = dio->iocb;
657         bool is_write = (dio->flags & IOMAP_DIO_WRITE);
658         ssize_t ret;
659
660         ret = iomap_dio_complete(dio);
661         if (is_write && ret > 0)
662                 ret = generic_write_sync(iocb, ret);
663         iocb->ki_complete(iocb, ret, 0);
664 }
665
666 /*
667  * Set an error in the dio if none is set yet.  We have to use cmpxchg
668  * as the submission context and the completion context(s) can race to
669  * update the error.
670  */
671 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
672 {
673         cmpxchg(&dio->error, 0, ret);
674 }
675
676 static void iomap_dio_bio_end_io(struct bio *bio)
677 {
678         struct iomap_dio *dio = bio->bi_private;
679         bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
680
681         if (bio->bi_error)
682                 iomap_dio_set_error(dio, bio->bi_error);
683
684         if (atomic_dec_and_test(&dio->ref)) {
685                 if (is_sync_kiocb(dio->iocb)) {
686                         struct task_struct *waiter = dio->submit.waiter;
687
688                         WRITE_ONCE(dio->submit.waiter, NULL);
689                         wake_up_process(waiter);
690                 } else if (dio->flags & IOMAP_DIO_WRITE) {
691                         struct inode *inode = file_inode(dio->iocb->ki_filp);
692
693                         INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
694                         queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
695                 } else {
696                         iomap_dio_complete_work(&dio->aio.work);
697                 }
698         }
699
700         if (should_dirty) {
701                 bio_check_pages_dirty(bio);
702         } else {
703                 struct bio_vec *bvec;
704                 int i;
705
706                 bio_for_each_segment_all(bvec, bio, i)
707                         put_page(bvec->bv_page);
708                 bio_put(bio);
709         }
710 }
711
712 static blk_qc_t
713 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
714                 unsigned len)
715 {
716         struct page *page = ZERO_PAGE(0);
717         struct bio *bio;
718
719         bio = bio_alloc(GFP_KERNEL, 1);
720         bio->bi_bdev = iomap->bdev;
721         bio->bi_iter.bi_sector =
722                 iomap->blkno + ((pos - iomap->offset) >> 9);
723         bio->bi_private = dio;
724         bio->bi_end_io = iomap_dio_bio_end_io;
725
726         get_page(page);
727         if (bio_add_page(bio, page, len, 0) != len)
728                 BUG();
729         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
730
731         atomic_inc(&dio->ref);
732         return submit_bio(bio);
733 }
734
735 static loff_t
736 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
737                 void *data, struct iomap *iomap)
738 {
739         struct iomap_dio *dio = data;
740         unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
741         unsigned int fs_block_size = i_blocksize(inode), pad;
742         unsigned int align = iov_iter_alignment(dio->submit.iter);
743         struct iov_iter iter;
744         struct bio *bio;
745         bool need_zeroout = false;
746         int nr_pages, ret;
747
748         if ((pos | length | align) & ((1 << blkbits) - 1))
749                 return -EINVAL;
750
751         switch (iomap->type) {
752         case IOMAP_HOLE:
753                 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
754                         return -EIO;
755                 /*FALLTHRU*/
756         case IOMAP_UNWRITTEN:
757                 if (!(dio->flags & IOMAP_DIO_WRITE)) {
758                         iov_iter_zero(length, dio->submit.iter);
759                         dio->size += length;
760                         return length;
761                 }
762                 dio->flags |= IOMAP_DIO_UNWRITTEN;
763                 need_zeroout = true;
764                 break;
765         case IOMAP_MAPPED:
766                 if (iomap->flags & IOMAP_F_SHARED)
767                         dio->flags |= IOMAP_DIO_COW;
768                 if (iomap->flags & IOMAP_F_NEW)
769                         need_zeroout = true;
770                 break;
771         default:
772                 WARN_ON_ONCE(1);
773                 return -EIO;
774         }
775
776         /*
777          * Operate on a partial iter trimmed to the extent we were called for.
778          * We'll update the iter in the dio once we're done with this extent.
779          */
780         iter = *dio->submit.iter;
781         iov_iter_truncate(&iter, length);
782
783         nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
784         if (nr_pages <= 0)
785                 return nr_pages;
786
787         if (need_zeroout) {
788                 /* zero out from the start of the block to the write offset */
789                 pad = pos & (fs_block_size - 1);
790                 if (pad)
791                         iomap_dio_zero(dio, iomap, pos - pad, pad);
792         }
793
794         do {
795                 if (dio->error)
796                         return 0;
797
798                 bio = bio_alloc(GFP_KERNEL, nr_pages);
799                 bio->bi_bdev = iomap->bdev;
800                 bio->bi_iter.bi_sector =
801                         iomap->blkno + ((pos - iomap->offset) >> 9);
802                 bio->bi_private = dio;
803                 bio->bi_end_io = iomap_dio_bio_end_io;
804
805                 ret = bio_iov_iter_get_pages(bio, &iter);
806                 if (unlikely(ret)) {
807                         bio_put(bio);
808                         return ret;
809                 }
810
811                 if (dio->flags & IOMAP_DIO_WRITE) {
812                         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
813                         task_io_account_write(bio->bi_iter.bi_size);
814                 } else {
815                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
816                         if (dio->flags & IOMAP_DIO_DIRTY)
817                                 bio_set_pages_dirty(bio);
818                 }
819
820                 dio->size += bio->bi_iter.bi_size;
821                 pos += bio->bi_iter.bi_size;
822
823                 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
824
825                 atomic_inc(&dio->ref);
826
827                 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
828                 dio->submit.cookie = submit_bio(bio);
829         } while (nr_pages);
830
831         if (need_zeroout) {
832                 /* zero out from the end of the write to the end of the block */
833                 pad = pos & (fs_block_size - 1);
834                 if (pad)
835                         iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
836         }
837
838         iov_iter_advance(dio->submit.iter, length);
839         return length;
840 }
841
842 ssize_t
843 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
844                 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
845 {
846         struct address_space *mapping = iocb->ki_filp->f_mapping;
847         struct inode *inode = file_inode(iocb->ki_filp);
848         size_t count = iov_iter_count(iter);
849         loff_t pos = iocb->ki_pos, start = pos;
850         loff_t end = iocb->ki_pos + count - 1, ret = 0;
851         unsigned int flags = IOMAP_DIRECT;
852         struct blk_plug plug;
853         struct iomap_dio *dio;
854
855         lockdep_assert_held(&inode->i_rwsem);
856
857         if (!count)
858                 return 0;
859
860         dio = kmalloc(sizeof(*dio), GFP_KERNEL);
861         if (!dio)
862                 return -ENOMEM;
863
864         dio->iocb = iocb;
865         atomic_set(&dio->ref, 1);
866         dio->size = 0;
867         dio->i_size = i_size_read(inode);
868         dio->end_io = end_io;
869         dio->error = 0;
870         dio->flags = 0;
871
872         dio->submit.iter = iter;
873         if (is_sync_kiocb(iocb)) {
874                 dio->submit.waiter = current;
875                 dio->submit.cookie = BLK_QC_T_NONE;
876                 dio->submit.last_queue = NULL;
877         }
878
879         if (iov_iter_rw(iter) == READ) {
880                 if (pos >= dio->i_size)
881                         goto out_free_dio;
882
883                 if (iter->type == ITER_IOVEC)
884                         dio->flags |= IOMAP_DIO_DIRTY;
885         } else {
886                 dio->flags |= IOMAP_DIO_WRITE;
887                 flags |= IOMAP_WRITE;
888         }
889
890         if (mapping->nrpages) {
891                 ret = filemap_write_and_wait_range(mapping, start, end);
892                 if (ret)
893                         goto out_free_dio;
894
895                 ret = invalidate_inode_pages2_range(mapping,
896                                 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
897                 WARN_ON_ONCE(ret);
898                 ret = 0;
899         }
900
901         inode_dio_begin(inode);
902
903         blk_start_plug(&plug);
904         do {
905                 ret = iomap_apply(inode, pos, count, flags, ops, dio,
906                                 iomap_dio_actor);
907                 if (ret <= 0) {
908                         /* magic error code to fall back to buffered I/O */
909                         if (ret == -ENOTBLK)
910                                 ret = 0;
911                         break;
912                 }
913                 pos += ret;
914         } while ((count = iov_iter_count(iter)) > 0);
915         blk_finish_plug(&plug);
916
917         if (ret < 0)
918                 iomap_dio_set_error(dio, ret);
919
920         if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
921                         !inode->i_sb->s_dio_done_wq) {
922                 ret = sb_init_dio_done_wq(inode->i_sb);
923                 if (ret < 0)
924                         iomap_dio_set_error(dio, ret);
925         }
926
927         if (!atomic_dec_and_test(&dio->ref)) {
928                 if (!is_sync_kiocb(iocb))
929                         return -EIOCBQUEUED;
930
931                 for (;;) {
932                         set_current_state(TASK_UNINTERRUPTIBLE);
933                         if (!READ_ONCE(dio->submit.waiter))
934                                 break;
935
936                         if (!(iocb->ki_flags & IOCB_HIPRI) ||
937                             !dio->submit.last_queue ||
938                             !blk_mq_poll(dio->submit.last_queue,
939                                          dio->submit.cookie))
940                                 io_schedule();
941                 }
942                 __set_current_state(TASK_RUNNING);
943         }
944
945         ret = iomap_dio_complete(dio);
946
947         /*
948          * Try again to invalidate clean pages which might have been cached by
949          * non-direct readahead, or faulted in by get_user_pages() if the source
950          * of the write was an mmap'ed region of the file we're writing.  Either
951          * one is a pretty crazy thing to do, so we don't support it 100%.  If
952          * this invalidation fails, tough, the write still worked...
953          */
954         if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
955                 int err = invalidate_inode_pages2_range(mapping,
956                                 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
957                 WARN_ON_ONCE(err);
958         }
959
960         return ret;
961
962 out_free_dio:
963         kfree(dio);
964         return ret;
965 }
966 EXPORT_SYMBOL_GPL(iomap_dio_rw);