Merge tag 'nfsd-4.11' of git://linux-nfs.org/~bfields/linux
[sfrench/cifs-2.6.git] / fs / iomap.c
1 /*
2  * Copyright (C) 2010 Red Hat, Inc.
3  * Copyright (c) 2016 Christoph Hellwig.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include "internal.h"
30
31 /*
32  * Execute a iomap write on a segment of the mapping that spans a
33  * contiguous range of pages that have identical block mapping state.
34  *
35  * This avoids the need to map pages individually, do individual allocations
36  * for each page and most importantly avoid the need for filesystem specific
37  * locking per page. Instead, all the operations are amortised over the entire
38  * range of pages. It is assumed that the filesystems will lock whatever
39  * resources they require in the iomap_begin call, and release them in the
40  * iomap_end call.
41  */
42 loff_t
43 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
44                 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
45 {
46         struct iomap iomap = { 0 };
47         loff_t written = 0, ret;
48
49         /*
50          * Need to map a range from start position for length bytes. This can
51          * span multiple pages - it is only guaranteed to return a range of a
52          * single type of pages (e.g. all into a hole, all mapped or all
53          * unwritten). Failure at this point has nothing to undo.
54          *
55          * If allocation is required for this range, reserve the space now so
56          * that the allocation is guaranteed to succeed later on. Once we copy
57          * the data into the page cache pages, then we cannot fail otherwise we
58          * expose transient stale data. If the reserve fails, we can safely
59          * back out at this point as there is nothing to undo.
60          */
61         ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
62         if (ret)
63                 return ret;
64         if (WARN_ON(iomap.offset > pos))
65                 return -EIO;
66
67         /*
68          * Cut down the length to the one actually provided by the filesystem,
69          * as it might not be able to give us the whole size that we requested.
70          */
71         if (iomap.offset + iomap.length < pos + length)
72                 length = iomap.offset + iomap.length - pos;
73
74         /*
75          * Now that we have guaranteed that the space allocation will succeed.
76          * we can do the copy-in page by page without having to worry about
77          * failures exposing transient data.
78          */
79         written = actor(inode, pos, length, data, &iomap);
80
81         /*
82          * Now the data has been copied, commit the range we've copied.  This
83          * should not fail unless the filesystem has had a fatal error.
84          */
85         if (ops->iomap_end) {
86                 ret = ops->iomap_end(inode, pos, length,
87                                      written > 0 ? written : 0,
88                                      flags, &iomap);
89         }
90
91         return written ? written : ret;
92 }
93
94 static void
95 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
96 {
97         loff_t i_size = i_size_read(inode);
98
99         /*
100          * Only truncate newly allocated pages beyoned EOF, even if the
101          * write started inside the existing inode size.
102          */
103         if (pos + len > i_size)
104                 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
105 }
106
107 static int
108 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
109                 struct page **pagep, struct iomap *iomap)
110 {
111         pgoff_t index = pos >> PAGE_SHIFT;
112         struct page *page;
113         int status = 0;
114
115         BUG_ON(pos + len > iomap->offset + iomap->length);
116
117         if (fatal_signal_pending(current))
118                 return -EINTR;
119
120         page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
121         if (!page)
122                 return -ENOMEM;
123
124         status = __block_write_begin_int(page, pos, len, NULL, iomap);
125         if (unlikely(status)) {
126                 unlock_page(page);
127                 put_page(page);
128                 page = NULL;
129
130                 iomap_write_failed(inode, pos, len);
131         }
132
133         *pagep = page;
134         return status;
135 }
136
137 static int
138 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
139                 unsigned copied, struct page *page)
140 {
141         int ret;
142
143         ret = generic_write_end(NULL, inode->i_mapping, pos, len,
144                         copied, page, NULL);
145         if (ret < len)
146                 iomap_write_failed(inode, pos, len);
147         return ret;
148 }
149
150 static loff_t
151 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
152                 struct iomap *iomap)
153 {
154         struct iov_iter *i = data;
155         long status = 0;
156         ssize_t written = 0;
157         unsigned int flags = AOP_FLAG_NOFS;
158
159         /*
160          * Copies from kernel address space cannot fail (NFSD is a big user).
161          */
162         if (!iter_is_iovec(i))
163                 flags |= AOP_FLAG_UNINTERRUPTIBLE;
164
165         do {
166                 struct page *page;
167                 unsigned long offset;   /* Offset into pagecache page */
168                 unsigned long bytes;    /* Bytes to write to page */
169                 size_t copied;          /* Bytes copied from user */
170
171                 offset = (pos & (PAGE_SIZE - 1));
172                 bytes = min_t(unsigned long, PAGE_SIZE - offset,
173                                                 iov_iter_count(i));
174 again:
175                 if (bytes > length)
176                         bytes = length;
177
178                 /*
179                  * Bring in the user page that we will copy from _first_.
180                  * Otherwise there's a nasty deadlock on copying from the
181                  * same page as we're writing to, without it being marked
182                  * up-to-date.
183                  *
184                  * Not only is this an optimisation, but it is also required
185                  * to check that the address is actually valid, when atomic
186                  * usercopies are used, below.
187                  */
188                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
189                         status = -EFAULT;
190                         break;
191                 }
192
193                 status = iomap_write_begin(inode, pos, bytes, flags, &page,
194                                 iomap);
195                 if (unlikely(status))
196                         break;
197
198                 if (mapping_writably_mapped(inode->i_mapping))
199                         flush_dcache_page(page);
200
201                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
202
203                 flush_dcache_page(page);
204
205                 status = iomap_write_end(inode, pos, bytes, copied, page);
206                 if (unlikely(status < 0))
207                         break;
208                 copied = status;
209
210                 cond_resched();
211
212                 iov_iter_advance(i, copied);
213                 if (unlikely(copied == 0)) {
214                         /*
215                          * If we were unable to copy any data at all, we must
216                          * fall back to a single segment length write.
217                          *
218                          * If we didn't fallback here, we could livelock
219                          * because not all segments in the iov can be copied at
220                          * once without a pagefault.
221                          */
222                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
223                                                 iov_iter_single_seg_count(i));
224                         goto again;
225                 }
226                 pos += copied;
227                 written += copied;
228                 length -= copied;
229
230                 balance_dirty_pages_ratelimited(inode->i_mapping);
231         } while (iov_iter_count(i) && length);
232
233         return written ? written : status;
234 }
235
236 ssize_t
237 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
238                 const struct iomap_ops *ops)
239 {
240         struct inode *inode = iocb->ki_filp->f_mapping->host;
241         loff_t pos = iocb->ki_pos, ret = 0, written = 0;
242
243         while (iov_iter_count(iter)) {
244                 ret = iomap_apply(inode, pos, iov_iter_count(iter),
245                                 IOMAP_WRITE, ops, iter, iomap_write_actor);
246                 if (ret <= 0)
247                         break;
248                 pos += ret;
249                 written += ret;
250         }
251
252         return written ? written : ret;
253 }
254 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
255
256 static struct page *
257 __iomap_read_page(struct inode *inode, loff_t offset)
258 {
259         struct address_space *mapping = inode->i_mapping;
260         struct page *page;
261
262         page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
263         if (IS_ERR(page))
264                 return page;
265         if (!PageUptodate(page)) {
266                 put_page(page);
267                 return ERR_PTR(-EIO);
268         }
269         return page;
270 }
271
272 static loff_t
273 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
274                 struct iomap *iomap)
275 {
276         long status = 0;
277         ssize_t written = 0;
278
279         do {
280                 struct page *page, *rpage;
281                 unsigned long offset;   /* Offset into pagecache page */
282                 unsigned long bytes;    /* Bytes to write to page */
283
284                 offset = (pos & (PAGE_SIZE - 1));
285                 bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
286
287                 rpage = __iomap_read_page(inode, pos);
288                 if (IS_ERR(rpage))
289                         return PTR_ERR(rpage);
290
291                 status = iomap_write_begin(inode, pos, bytes,
292                                 AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
293                                 &page, iomap);
294                 put_page(rpage);
295                 if (unlikely(status))
296                         return status;
297
298                 WARN_ON_ONCE(!PageUptodate(page));
299
300                 status = iomap_write_end(inode, pos, bytes, bytes, page);
301                 if (unlikely(status <= 0)) {
302                         if (WARN_ON_ONCE(status == 0))
303                                 return -EIO;
304                         return status;
305                 }
306
307                 cond_resched();
308
309                 pos += status;
310                 written += status;
311                 length -= status;
312
313                 balance_dirty_pages_ratelimited(inode->i_mapping);
314         } while (length);
315
316         return written;
317 }
318
319 int
320 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
321                 const struct iomap_ops *ops)
322 {
323         loff_t ret;
324
325         while (len) {
326                 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
327                                 iomap_dirty_actor);
328                 if (ret <= 0)
329                         return ret;
330                 pos += ret;
331                 len -= ret;
332         }
333
334         return 0;
335 }
336 EXPORT_SYMBOL_GPL(iomap_file_dirty);
337
338 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
339                 unsigned bytes, struct iomap *iomap)
340 {
341         struct page *page;
342         int status;
343
344         status = iomap_write_begin(inode, pos, bytes,
345                         AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
346         if (status)
347                 return status;
348
349         zero_user(page, offset, bytes);
350         mark_page_accessed(page);
351
352         return iomap_write_end(inode, pos, bytes, bytes, page);
353 }
354
355 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
356                 struct iomap *iomap)
357 {
358         sector_t sector = iomap->blkno +
359                 (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
360
361         return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
362 }
363
364 static loff_t
365 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
366                 void *data, struct iomap *iomap)
367 {
368         bool *did_zero = data;
369         loff_t written = 0;
370         int status;
371
372         /* already zeroed?  we're done. */
373         if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
374                 return count;
375
376         do {
377                 unsigned offset, bytes;
378
379                 offset = pos & (PAGE_SIZE - 1); /* Within page */
380                 bytes = min_t(unsigned, PAGE_SIZE - offset, count);
381
382                 if (IS_DAX(inode))
383                         status = iomap_dax_zero(pos, offset, bytes, iomap);
384                 else
385                         status = iomap_zero(inode, pos, offset, bytes, iomap);
386                 if (status < 0)
387                         return status;
388
389                 pos += bytes;
390                 count -= bytes;
391                 written += bytes;
392                 if (did_zero)
393                         *did_zero = true;
394         } while (count > 0);
395
396         return written;
397 }
398
399 int
400 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
401                 const struct iomap_ops *ops)
402 {
403         loff_t ret;
404
405         while (len > 0) {
406                 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
407                                 ops, did_zero, iomap_zero_range_actor);
408                 if (ret <= 0)
409                         return ret;
410
411                 pos += ret;
412                 len -= ret;
413         }
414
415         return 0;
416 }
417 EXPORT_SYMBOL_GPL(iomap_zero_range);
418
419 int
420 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
421                 const struct iomap_ops *ops)
422 {
423         unsigned int blocksize = i_blocksize(inode);
424         unsigned int off = pos & (blocksize - 1);
425
426         /* Block boundary? Nothing to do */
427         if (!off)
428                 return 0;
429         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
430 }
431 EXPORT_SYMBOL_GPL(iomap_truncate_page);
432
433 static loff_t
434 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
435                 void *data, struct iomap *iomap)
436 {
437         struct page *page = data;
438         int ret;
439
440         ret = __block_write_begin_int(page, pos, length, NULL, iomap);
441         if (ret)
442                 return ret;
443
444         block_commit_write(page, 0, length);
445         return length;
446 }
447
448 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
449 {
450         struct page *page = vmf->page;
451         struct inode *inode = file_inode(vmf->vma->vm_file);
452         unsigned long length;
453         loff_t offset, size;
454         ssize_t ret;
455
456         lock_page(page);
457         size = i_size_read(inode);
458         if ((page->mapping != inode->i_mapping) ||
459             (page_offset(page) > size)) {
460                 /* We overload EFAULT to mean page got truncated */
461                 ret = -EFAULT;
462                 goto out_unlock;
463         }
464
465         /* page is wholly or partially inside EOF */
466         if (((page->index + 1) << PAGE_SHIFT) > size)
467                 length = size & ~PAGE_MASK;
468         else
469                 length = PAGE_SIZE;
470
471         offset = page_offset(page);
472         while (length > 0) {
473                 ret = iomap_apply(inode, offset, length,
474                                 IOMAP_WRITE | IOMAP_FAULT, ops, page,
475                                 iomap_page_mkwrite_actor);
476                 if (unlikely(ret <= 0))
477                         goto out_unlock;
478                 offset += ret;
479                 length -= ret;
480         }
481
482         set_page_dirty(page);
483         wait_for_stable_page(page);
484         return 0;
485 out_unlock:
486         unlock_page(page);
487         return ret;
488 }
489 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
490
491 struct fiemap_ctx {
492         struct fiemap_extent_info *fi;
493         struct iomap prev;
494 };
495
496 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
497                 struct iomap *iomap, u32 flags)
498 {
499         switch (iomap->type) {
500         case IOMAP_HOLE:
501                 /* skip holes */
502                 return 0;
503         case IOMAP_DELALLOC:
504                 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
505                 break;
506         case IOMAP_UNWRITTEN:
507                 flags |= FIEMAP_EXTENT_UNWRITTEN;
508                 break;
509         case IOMAP_MAPPED:
510                 break;
511         }
512
513         if (iomap->flags & IOMAP_F_MERGED)
514                 flags |= FIEMAP_EXTENT_MERGED;
515         if (iomap->flags & IOMAP_F_SHARED)
516                 flags |= FIEMAP_EXTENT_SHARED;
517
518         return fiemap_fill_next_extent(fi, iomap->offset,
519                         iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
520                         iomap->length, flags);
521
522 }
523
524 static loff_t
525 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
526                 struct iomap *iomap)
527 {
528         struct fiemap_ctx *ctx = data;
529         loff_t ret = length;
530
531         if (iomap->type == IOMAP_HOLE)
532                 return length;
533
534         ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
535         ctx->prev = *iomap;
536         switch (ret) {
537         case 0:         /* success */
538                 return length;
539         case 1:         /* extent array full */
540                 return 0;
541         default:
542                 return ret;
543         }
544 }
545
546 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
547                 loff_t start, loff_t len, const struct iomap_ops *ops)
548 {
549         struct fiemap_ctx ctx;
550         loff_t ret;
551
552         memset(&ctx, 0, sizeof(ctx));
553         ctx.fi = fi;
554         ctx.prev.type = IOMAP_HOLE;
555
556         ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
557         if (ret)
558                 return ret;
559
560         if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
561                 ret = filemap_write_and_wait(inode->i_mapping);
562                 if (ret)
563                         return ret;
564         }
565
566         while (len > 0) {
567                 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
568                                 iomap_fiemap_actor);
569                 /* inode with no (attribute) mapping will give ENOENT */
570                 if (ret == -ENOENT)
571                         break;
572                 if (ret < 0)
573                         return ret;
574                 if (ret == 0)
575                         break;
576
577                 start += ret;
578                 len -= ret;
579         }
580
581         if (ctx.prev.type != IOMAP_HOLE) {
582                 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
583                 if (ret < 0)
584                         return ret;
585         }
586
587         return 0;
588 }
589 EXPORT_SYMBOL_GPL(iomap_fiemap);
590
591 /*
592  * Private flags for iomap_dio, must not overlap with the public ones in
593  * iomap.h:
594  */
595 #define IOMAP_DIO_WRITE         (1 << 30)
596 #define IOMAP_DIO_DIRTY         (1 << 31)
597
598 struct iomap_dio {
599         struct kiocb            *iocb;
600         iomap_dio_end_io_t      *end_io;
601         loff_t                  i_size;
602         loff_t                  size;
603         atomic_t                ref;
604         unsigned                flags;
605         int                     error;
606
607         union {
608                 /* used during submission and for synchronous completion: */
609                 struct {
610                         struct iov_iter         *iter;
611                         struct task_struct      *waiter;
612                         struct request_queue    *last_queue;
613                         blk_qc_t                cookie;
614                 } submit;
615
616                 /* used for aio completion: */
617                 struct {
618                         struct work_struct      work;
619                 } aio;
620         };
621 };
622
623 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
624 {
625         struct kiocb *iocb = dio->iocb;
626         ssize_t ret;
627
628         if (dio->end_io) {
629                 ret = dio->end_io(iocb,
630                                 dio->error ? dio->error : dio->size,
631                                 dio->flags);
632         } else {
633                 ret = dio->error;
634         }
635
636         if (likely(!ret)) {
637                 ret = dio->size;
638                 /* check for short read */
639                 if (iocb->ki_pos + ret > dio->i_size &&
640                     !(dio->flags & IOMAP_DIO_WRITE))
641                         ret = dio->i_size - iocb->ki_pos;
642                 iocb->ki_pos += ret;
643         }
644
645         inode_dio_end(file_inode(iocb->ki_filp));
646         kfree(dio);
647
648         return ret;
649 }
650
651 static void iomap_dio_complete_work(struct work_struct *work)
652 {
653         struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
654         struct kiocb *iocb = dio->iocb;
655         bool is_write = (dio->flags & IOMAP_DIO_WRITE);
656         ssize_t ret;
657
658         ret = iomap_dio_complete(dio);
659         if (is_write && ret > 0)
660                 ret = generic_write_sync(iocb, ret);
661         iocb->ki_complete(iocb, ret, 0);
662 }
663
664 /*
665  * Set an error in the dio if none is set yet.  We have to use cmpxchg
666  * as the submission context and the completion context(s) can race to
667  * update the error.
668  */
669 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
670 {
671         cmpxchg(&dio->error, 0, ret);
672 }
673
674 static void iomap_dio_bio_end_io(struct bio *bio)
675 {
676         struct iomap_dio *dio = bio->bi_private;
677         bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
678
679         if (bio->bi_error)
680                 iomap_dio_set_error(dio, bio->bi_error);
681
682         if (atomic_dec_and_test(&dio->ref)) {
683                 if (is_sync_kiocb(dio->iocb)) {
684                         struct task_struct *waiter = dio->submit.waiter;
685
686                         WRITE_ONCE(dio->submit.waiter, NULL);
687                         wake_up_process(waiter);
688                 } else if (dio->flags & IOMAP_DIO_WRITE) {
689                         struct inode *inode = file_inode(dio->iocb->ki_filp);
690
691                         INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
692                         queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
693                 } else {
694                         iomap_dio_complete_work(&dio->aio.work);
695                 }
696         }
697
698         if (should_dirty) {
699                 bio_check_pages_dirty(bio);
700         } else {
701                 struct bio_vec *bvec;
702                 int i;
703
704                 bio_for_each_segment_all(bvec, bio, i)
705                         put_page(bvec->bv_page);
706                 bio_put(bio);
707         }
708 }
709
710 static blk_qc_t
711 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
712                 unsigned len)
713 {
714         struct page *page = ZERO_PAGE(0);
715         struct bio *bio;
716
717         bio = bio_alloc(GFP_KERNEL, 1);
718         bio->bi_bdev = iomap->bdev;
719         bio->bi_iter.bi_sector =
720                 iomap->blkno + ((pos - iomap->offset) >> 9);
721         bio->bi_private = dio;
722         bio->bi_end_io = iomap_dio_bio_end_io;
723
724         get_page(page);
725         if (bio_add_page(bio, page, len, 0) != len)
726                 BUG();
727         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
728
729         atomic_inc(&dio->ref);
730         return submit_bio(bio);
731 }
732
733 static loff_t
734 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
735                 void *data, struct iomap *iomap)
736 {
737         struct iomap_dio *dio = data;
738         unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
739         unsigned int fs_block_size = i_blocksize(inode), pad;
740         unsigned int align = iov_iter_alignment(dio->submit.iter);
741         struct iov_iter iter;
742         struct bio *bio;
743         bool need_zeroout = false;
744         int nr_pages, ret;
745
746         if ((pos | length | align) & ((1 << blkbits) - 1))
747                 return -EINVAL;
748
749         switch (iomap->type) {
750         case IOMAP_HOLE:
751                 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
752                         return -EIO;
753                 /*FALLTHRU*/
754         case IOMAP_UNWRITTEN:
755                 if (!(dio->flags & IOMAP_DIO_WRITE)) {
756                         iov_iter_zero(length, dio->submit.iter);
757                         dio->size += length;
758                         return length;
759                 }
760                 dio->flags |= IOMAP_DIO_UNWRITTEN;
761                 need_zeroout = true;
762                 break;
763         case IOMAP_MAPPED:
764                 if (iomap->flags & IOMAP_F_SHARED)
765                         dio->flags |= IOMAP_DIO_COW;
766                 if (iomap->flags & IOMAP_F_NEW)
767                         need_zeroout = true;
768                 break;
769         default:
770                 WARN_ON_ONCE(1);
771                 return -EIO;
772         }
773
774         /*
775          * Operate on a partial iter trimmed to the extent we were called for.
776          * We'll update the iter in the dio once we're done with this extent.
777          */
778         iter = *dio->submit.iter;
779         iov_iter_truncate(&iter, length);
780
781         nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
782         if (nr_pages <= 0)
783                 return nr_pages;
784
785         if (need_zeroout) {
786                 /* zero out from the start of the block to the write offset */
787                 pad = pos & (fs_block_size - 1);
788                 if (pad)
789                         iomap_dio_zero(dio, iomap, pos - pad, pad);
790         }
791
792         do {
793                 if (dio->error)
794                         return 0;
795
796                 bio = bio_alloc(GFP_KERNEL, nr_pages);
797                 bio->bi_bdev = iomap->bdev;
798                 bio->bi_iter.bi_sector =
799                         iomap->blkno + ((pos - iomap->offset) >> 9);
800                 bio->bi_private = dio;
801                 bio->bi_end_io = iomap_dio_bio_end_io;
802
803                 ret = bio_iov_iter_get_pages(bio, &iter);
804                 if (unlikely(ret)) {
805                         bio_put(bio);
806                         return ret;
807                 }
808
809                 if (dio->flags & IOMAP_DIO_WRITE) {
810                         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
811                         task_io_account_write(bio->bi_iter.bi_size);
812                 } else {
813                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
814                         if (dio->flags & IOMAP_DIO_DIRTY)
815                                 bio_set_pages_dirty(bio);
816                 }
817
818                 dio->size += bio->bi_iter.bi_size;
819                 pos += bio->bi_iter.bi_size;
820
821                 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
822
823                 atomic_inc(&dio->ref);
824
825                 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
826                 dio->submit.cookie = submit_bio(bio);
827         } while (nr_pages);
828
829         if (need_zeroout) {
830                 /* zero out from the end of the write to the end of the block */
831                 pad = pos & (fs_block_size - 1);
832                 if (pad)
833                         iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
834         }
835
836         iov_iter_advance(dio->submit.iter, length);
837         return length;
838 }
839
840 ssize_t
841 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
842                 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
843 {
844         struct address_space *mapping = iocb->ki_filp->f_mapping;
845         struct inode *inode = file_inode(iocb->ki_filp);
846         size_t count = iov_iter_count(iter);
847         loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
848         unsigned int flags = IOMAP_DIRECT;
849         struct blk_plug plug;
850         struct iomap_dio *dio;
851
852         lockdep_assert_held(&inode->i_rwsem);
853
854         if (!count)
855                 return 0;
856
857         dio = kmalloc(sizeof(*dio), GFP_KERNEL);
858         if (!dio)
859                 return -ENOMEM;
860
861         dio->iocb = iocb;
862         atomic_set(&dio->ref, 1);
863         dio->size = 0;
864         dio->i_size = i_size_read(inode);
865         dio->end_io = end_io;
866         dio->error = 0;
867         dio->flags = 0;
868
869         dio->submit.iter = iter;
870         if (is_sync_kiocb(iocb)) {
871                 dio->submit.waiter = current;
872                 dio->submit.cookie = BLK_QC_T_NONE;
873                 dio->submit.last_queue = NULL;
874         }
875
876         if (iov_iter_rw(iter) == READ) {
877                 if (pos >= dio->i_size)
878                         goto out_free_dio;
879
880                 if (iter->type == ITER_IOVEC)
881                         dio->flags |= IOMAP_DIO_DIRTY;
882         } else {
883                 dio->flags |= IOMAP_DIO_WRITE;
884                 flags |= IOMAP_WRITE;
885         }
886
887         if (mapping->nrpages) {
888                 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
889                 if (ret)
890                         goto out_free_dio;
891
892                 ret = invalidate_inode_pages2_range(mapping,
893                                 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
894                 WARN_ON_ONCE(ret);
895                 ret = 0;
896         }
897
898         inode_dio_begin(inode);
899
900         blk_start_plug(&plug);
901         do {
902                 ret = iomap_apply(inode, pos, count, flags, ops, dio,
903                                 iomap_dio_actor);
904                 if (ret <= 0) {
905                         /* magic error code to fall back to buffered I/O */
906                         if (ret == -ENOTBLK)
907                                 ret = 0;
908                         break;
909                 }
910                 pos += ret;
911         } while ((count = iov_iter_count(iter)) > 0);
912         blk_finish_plug(&plug);
913
914         if (ret < 0)
915                 iomap_dio_set_error(dio, ret);
916
917         if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
918                         !inode->i_sb->s_dio_done_wq) {
919                 ret = sb_init_dio_done_wq(inode->i_sb);
920                 if (ret < 0)
921                         iomap_dio_set_error(dio, ret);
922         }
923
924         if (!atomic_dec_and_test(&dio->ref)) {
925                 if (!is_sync_kiocb(iocb))
926                         return -EIOCBQUEUED;
927
928                 for (;;) {
929                         set_current_state(TASK_UNINTERRUPTIBLE);
930                         if (!READ_ONCE(dio->submit.waiter))
931                                 break;
932
933                         if (!(iocb->ki_flags & IOCB_HIPRI) ||
934                             !dio->submit.last_queue ||
935                             !blk_mq_poll(dio->submit.last_queue,
936                                          dio->submit.cookie))
937                                 io_schedule();
938                 }
939                 __set_current_state(TASK_RUNNING);
940         }
941
942         /*
943          * Try again to invalidate clean pages which might have been cached by
944          * non-direct readahead, or faulted in by get_user_pages() if the source
945          * of the write was an mmap'ed region of the file we're writing.  Either
946          * one is a pretty crazy thing to do, so we don't support it 100%.  If
947          * this invalidation fails, tough, the write still worked...
948          */
949         if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
950                 ret = invalidate_inode_pages2_range(mapping,
951                                 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
952                 WARN_ON_ONCE(ret);
953         }
954
955         return iomap_dio_complete(dio);
956
957 out_free_dio:
958         kfree(dio);
959         return ret;
960 }
961 EXPORT_SYMBOL_GPL(iomap_dio_rw);