Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
[sfrench/cifs-2.6.git] / fs / iomap.c
1 /*
2  * Copyright (C) 2010 Red Hat, Inc.
3  * Copyright (c) 2016 Christoph Hellwig.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include "internal.h"
30
31 /*
32  * Execute a iomap write on a segment of the mapping that spans a
33  * contiguous range of pages that have identical block mapping state.
34  *
35  * This avoids the need to map pages individually, do individual allocations
36  * for each page and most importantly avoid the need for filesystem specific
37  * locking per page. Instead, all the operations are amortised over the entire
38  * range of pages. It is assumed that the filesystems will lock whatever
39  * resources they require in the iomap_begin call, and release them in the
40  * iomap_end call.
41  */
42 loff_t
43 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
44                 struct iomap_ops *ops, void *data, iomap_actor_t actor)
45 {
46         struct iomap iomap = { 0 };
47         loff_t written = 0, ret;
48
49         /*
50          * Need to map a range from start position for length bytes. This can
51          * span multiple pages - it is only guaranteed to return a range of a
52          * single type of pages (e.g. all into a hole, all mapped or all
53          * unwritten). Failure at this point has nothing to undo.
54          *
55          * If allocation is required for this range, reserve the space now so
56          * that the allocation is guaranteed to succeed later on. Once we copy
57          * the data into the page cache pages, then we cannot fail otherwise we
58          * expose transient stale data. If the reserve fails, we can safely
59          * back out at this point as there is nothing to undo.
60          */
61         ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
62         if (ret)
63                 return ret;
64         if (WARN_ON(iomap.offset > pos))
65                 return -EIO;
66
67         /*
68          * Cut down the length to the one actually provided by the filesystem,
69          * as it might not be able to give us the whole size that we requested.
70          */
71         if (iomap.offset + iomap.length < pos + length)
72                 length = iomap.offset + iomap.length - pos;
73
74         /*
75          * Now that we have guaranteed that the space allocation will succeed.
76          * we can do the copy-in page by page without having to worry about
77          * failures exposing transient data.
78          */
79         written = actor(inode, pos, length, data, &iomap);
80
81         /*
82          * Now the data has been copied, commit the range we've copied.  This
83          * should not fail unless the filesystem has had a fatal error.
84          */
85         if (ops->iomap_end) {
86                 ret = ops->iomap_end(inode, pos, length,
87                                      written > 0 ? written : 0,
88                                      flags, &iomap);
89         }
90
91         return written ? written : ret;
92 }
93
94 static void
95 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
96 {
97         loff_t i_size = i_size_read(inode);
98
99         /*
100          * Only truncate newly allocated pages beyoned EOF, even if the
101          * write started inside the existing inode size.
102          */
103         if (pos + len > i_size)
104                 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
105 }
106
107 static int
108 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
109                 struct page **pagep, struct iomap *iomap)
110 {
111         pgoff_t index = pos >> PAGE_SHIFT;
112         struct page *page;
113         int status = 0;
114
115         BUG_ON(pos + len > iomap->offset + iomap->length);
116
117         page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
118         if (!page)
119                 return -ENOMEM;
120
121         status = __block_write_begin_int(page, pos, len, NULL, iomap);
122         if (unlikely(status)) {
123                 unlock_page(page);
124                 put_page(page);
125                 page = NULL;
126
127                 iomap_write_failed(inode, pos, len);
128         }
129
130         *pagep = page;
131         return status;
132 }
133
134 static int
135 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
136                 unsigned copied, struct page *page)
137 {
138         int ret;
139
140         ret = generic_write_end(NULL, inode->i_mapping, pos, len,
141                         copied, page, NULL);
142         if (ret < len)
143                 iomap_write_failed(inode, pos, len);
144         return ret;
145 }
146
147 static loff_t
148 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
149                 struct iomap *iomap)
150 {
151         struct iov_iter *i = data;
152         long status = 0;
153         ssize_t written = 0;
154         unsigned int flags = AOP_FLAG_NOFS;
155
156         /*
157          * Copies from kernel address space cannot fail (NFSD is a big user).
158          */
159         if (!iter_is_iovec(i))
160                 flags |= AOP_FLAG_UNINTERRUPTIBLE;
161
162         do {
163                 struct page *page;
164                 unsigned long offset;   /* Offset into pagecache page */
165                 unsigned long bytes;    /* Bytes to write to page */
166                 size_t copied;          /* Bytes copied from user */
167
168                 offset = (pos & (PAGE_SIZE - 1));
169                 bytes = min_t(unsigned long, PAGE_SIZE - offset,
170                                                 iov_iter_count(i));
171 again:
172                 if (bytes > length)
173                         bytes = length;
174
175                 /*
176                  * Bring in the user page that we will copy from _first_.
177                  * Otherwise there's a nasty deadlock on copying from the
178                  * same page as we're writing to, without it being marked
179                  * up-to-date.
180                  *
181                  * Not only is this an optimisation, but it is also required
182                  * to check that the address is actually valid, when atomic
183                  * usercopies are used, below.
184                  */
185                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
186                         status = -EFAULT;
187                         break;
188                 }
189
190                 status = iomap_write_begin(inode, pos, bytes, flags, &page,
191                                 iomap);
192                 if (unlikely(status))
193                         break;
194
195                 if (mapping_writably_mapped(inode->i_mapping))
196                         flush_dcache_page(page);
197
198                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
199
200                 flush_dcache_page(page);
201
202                 status = iomap_write_end(inode, pos, bytes, copied, page);
203                 if (unlikely(status < 0))
204                         break;
205                 copied = status;
206
207                 cond_resched();
208
209                 iov_iter_advance(i, copied);
210                 if (unlikely(copied == 0)) {
211                         /*
212                          * If we were unable to copy any data at all, we must
213                          * fall back to a single segment length write.
214                          *
215                          * If we didn't fallback here, we could livelock
216                          * because not all segments in the iov can be copied at
217                          * once without a pagefault.
218                          */
219                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
220                                                 iov_iter_single_seg_count(i));
221                         goto again;
222                 }
223                 pos += copied;
224                 written += copied;
225                 length -= copied;
226
227                 balance_dirty_pages_ratelimited(inode->i_mapping);
228         } while (iov_iter_count(i) && length);
229
230         return written ? written : status;
231 }
232
233 ssize_t
234 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
235                 struct iomap_ops *ops)
236 {
237         struct inode *inode = iocb->ki_filp->f_mapping->host;
238         loff_t pos = iocb->ki_pos, ret = 0, written = 0;
239
240         while (iov_iter_count(iter)) {
241                 ret = iomap_apply(inode, pos, iov_iter_count(iter),
242                                 IOMAP_WRITE, ops, iter, iomap_write_actor);
243                 if (ret <= 0)
244                         break;
245                 pos += ret;
246                 written += ret;
247         }
248
249         return written ? written : ret;
250 }
251 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
252
253 static struct page *
254 __iomap_read_page(struct inode *inode, loff_t offset)
255 {
256         struct address_space *mapping = inode->i_mapping;
257         struct page *page;
258
259         page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
260         if (IS_ERR(page))
261                 return page;
262         if (!PageUptodate(page)) {
263                 put_page(page);
264                 return ERR_PTR(-EIO);
265         }
266         return page;
267 }
268
269 static loff_t
270 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
271                 struct iomap *iomap)
272 {
273         long status = 0;
274         ssize_t written = 0;
275
276         do {
277                 struct page *page, *rpage;
278                 unsigned long offset;   /* Offset into pagecache page */
279                 unsigned long bytes;    /* Bytes to write to page */
280
281                 offset = (pos & (PAGE_SIZE - 1));
282                 bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
283
284                 rpage = __iomap_read_page(inode, pos);
285                 if (IS_ERR(rpage))
286                         return PTR_ERR(rpage);
287
288                 status = iomap_write_begin(inode, pos, bytes,
289                                 AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
290                                 &page, iomap);
291                 put_page(rpage);
292                 if (unlikely(status))
293                         return status;
294
295                 WARN_ON_ONCE(!PageUptodate(page));
296
297                 status = iomap_write_end(inode, pos, bytes, bytes, page);
298                 if (unlikely(status <= 0)) {
299                         if (WARN_ON_ONCE(status == 0))
300                                 return -EIO;
301                         return status;
302                 }
303
304                 cond_resched();
305
306                 pos += status;
307                 written += status;
308                 length -= status;
309
310                 balance_dirty_pages_ratelimited(inode->i_mapping);
311         } while (length);
312
313         return written;
314 }
315
316 int
317 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
318                 struct iomap_ops *ops)
319 {
320         loff_t ret;
321
322         while (len) {
323                 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
324                                 iomap_dirty_actor);
325                 if (ret <= 0)
326                         return ret;
327                 pos += ret;
328                 len -= ret;
329         }
330
331         return 0;
332 }
333 EXPORT_SYMBOL_GPL(iomap_file_dirty);
334
335 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
336                 unsigned bytes, struct iomap *iomap)
337 {
338         struct page *page;
339         int status;
340
341         status = iomap_write_begin(inode, pos, bytes,
342                         AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
343         if (status)
344                 return status;
345
346         zero_user(page, offset, bytes);
347         mark_page_accessed(page);
348
349         return iomap_write_end(inode, pos, bytes, bytes, page);
350 }
351
352 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
353                 struct iomap *iomap)
354 {
355         sector_t sector = iomap->blkno +
356                 (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
357
358         return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
359 }
360
361 static loff_t
362 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
363                 void *data, struct iomap *iomap)
364 {
365         bool *did_zero = data;
366         loff_t written = 0;
367         int status;
368
369         /* already zeroed?  we're done. */
370         if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
371                 return count;
372
373         do {
374                 unsigned offset, bytes;
375
376                 offset = pos & (PAGE_SIZE - 1); /* Within page */
377                 bytes = min_t(unsigned, PAGE_SIZE - offset, count);
378
379                 if (IS_DAX(inode))
380                         status = iomap_dax_zero(pos, offset, bytes, iomap);
381                 else
382                         status = iomap_zero(inode, pos, offset, bytes, iomap);
383                 if (status < 0)
384                         return status;
385
386                 pos += bytes;
387                 count -= bytes;
388                 written += bytes;
389                 if (did_zero)
390                         *did_zero = true;
391         } while (count > 0);
392
393         return written;
394 }
395
396 int
397 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
398                 struct iomap_ops *ops)
399 {
400         loff_t ret;
401
402         while (len > 0) {
403                 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
404                                 ops, did_zero, iomap_zero_range_actor);
405                 if (ret <= 0)
406                         return ret;
407
408                 pos += ret;
409                 len -= ret;
410         }
411
412         return 0;
413 }
414 EXPORT_SYMBOL_GPL(iomap_zero_range);
415
416 int
417 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
418                 struct iomap_ops *ops)
419 {
420         unsigned blocksize = (1 << inode->i_blkbits);
421         unsigned off = pos & (blocksize - 1);
422
423         /* Block boundary? Nothing to do */
424         if (!off)
425                 return 0;
426         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
427 }
428 EXPORT_SYMBOL_GPL(iomap_truncate_page);
429
430 static loff_t
431 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
432                 void *data, struct iomap *iomap)
433 {
434         struct page *page = data;
435         int ret;
436
437         ret = __block_write_begin_int(page, pos, length, NULL, iomap);
438         if (ret)
439                 return ret;
440
441         block_commit_write(page, 0, length);
442         return length;
443 }
444
445 int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
446                 struct iomap_ops *ops)
447 {
448         struct page *page = vmf->page;
449         struct inode *inode = file_inode(vma->vm_file);
450         unsigned long length;
451         loff_t offset, size;
452         ssize_t ret;
453
454         lock_page(page);
455         size = i_size_read(inode);
456         if ((page->mapping != inode->i_mapping) ||
457             (page_offset(page) > size)) {
458                 /* We overload EFAULT to mean page got truncated */
459                 ret = -EFAULT;
460                 goto out_unlock;
461         }
462
463         /* page is wholly or partially inside EOF */
464         if (((page->index + 1) << PAGE_SHIFT) > size)
465                 length = size & ~PAGE_MASK;
466         else
467                 length = PAGE_SIZE;
468
469         offset = page_offset(page);
470         while (length > 0) {
471                 ret = iomap_apply(inode, offset, length,
472                                 IOMAP_WRITE | IOMAP_FAULT, ops, page,
473                                 iomap_page_mkwrite_actor);
474                 if (unlikely(ret <= 0))
475                         goto out_unlock;
476                 offset += ret;
477                 length -= ret;
478         }
479
480         set_page_dirty(page);
481         wait_for_stable_page(page);
482         return 0;
483 out_unlock:
484         unlock_page(page);
485         return ret;
486 }
487 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
488
489 struct fiemap_ctx {
490         struct fiemap_extent_info *fi;
491         struct iomap prev;
492 };
493
494 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
495                 struct iomap *iomap, u32 flags)
496 {
497         switch (iomap->type) {
498         case IOMAP_HOLE:
499                 /* skip holes */
500                 return 0;
501         case IOMAP_DELALLOC:
502                 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
503                 break;
504         case IOMAP_UNWRITTEN:
505                 flags |= FIEMAP_EXTENT_UNWRITTEN;
506                 break;
507         case IOMAP_MAPPED:
508                 break;
509         }
510
511         if (iomap->flags & IOMAP_F_MERGED)
512                 flags |= FIEMAP_EXTENT_MERGED;
513         if (iomap->flags & IOMAP_F_SHARED)
514                 flags |= FIEMAP_EXTENT_SHARED;
515
516         return fiemap_fill_next_extent(fi, iomap->offset,
517                         iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
518                         iomap->length, flags);
519
520 }
521
522 static loff_t
523 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
524                 struct iomap *iomap)
525 {
526         struct fiemap_ctx *ctx = data;
527         loff_t ret = length;
528
529         if (iomap->type == IOMAP_HOLE)
530                 return length;
531
532         ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
533         ctx->prev = *iomap;
534         switch (ret) {
535         case 0:         /* success */
536                 return length;
537         case 1:         /* extent array full */
538                 return 0;
539         default:
540                 return ret;
541         }
542 }
543
544 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
545                 loff_t start, loff_t len, struct iomap_ops *ops)
546 {
547         struct fiemap_ctx ctx;
548         loff_t ret;
549
550         memset(&ctx, 0, sizeof(ctx));
551         ctx.fi = fi;
552         ctx.prev.type = IOMAP_HOLE;
553
554         ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
555         if (ret)
556                 return ret;
557
558         if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
559                 ret = filemap_write_and_wait(inode->i_mapping);
560                 if (ret)
561                         return ret;
562         }
563
564         while (len > 0) {
565                 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
566                                 iomap_fiemap_actor);
567                 /* inode with no (attribute) mapping will give ENOENT */
568                 if (ret == -ENOENT)
569                         break;
570                 if (ret < 0)
571                         return ret;
572                 if (ret == 0)
573                         break;
574
575                 start += ret;
576                 len -= ret;
577         }
578
579         if (ctx.prev.type != IOMAP_HOLE) {
580                 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
581                 if (ret < 0)
582                         return ret;
583         }
584
585         return 0;
586 }
587 EXPORT_SYMBOL_GPL(iomap_fiemap);
588
589 /*
590  * Private flags for iomap_dio, must not overlap with the public ones in
591  * iomap.h:
592  */
593 #define IOMAP_DIO_WRITE         (1 << 30)
594 #define IOMAP_DIO_DIRTY         (1 << 31)
595
596 struct iomap_dio {
597         struct kiocb            *iocb;
598         iomap_dio_end_io_t      *end_io;
599         loff_t                  i_size;
600         loff_t                  size;
601         atomic_t                ref;
602         unsigned                flags;
603         int                     error;
604
605         union {
606                 /* used during submission and for synchronous completion: */
607                 struct {
608                         struct iov_iter         *iter;
609                         struct task_struct      *waiter;
610                         struct request_queue    *last_queue;
611                         blk_qc_t                cookie;
612                 } submit;
613
614                 /* used for aio completion: */
615                 struct {
616                         struct work_struct      work;
617                 } aio;
618         };
619 };
620
621 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
622 {
623         struct kiocb *iocb = dio->iocb;
624         ssize_t ret;
625
626         if (dio->end_io) {
627                 ret = dio->end_io(iocb,
628                                 dio->error ? dio->error : dio->size,
629                                 dio->flags);
630         } else {
631                 ret = dio->error;
632         }
633
634         if (likely(!ret)) {
635                 ret = dio->size;
636                 /* check for short read */
637                 if (iocb->ki_pos + ret > dio->i_size &&
638                     !(dio->flags & IOMAP_DIO_WRITE))
639                         ret = dio->i_size - iocb->ki_pos;
640                 iocb->ki_pos += ret;
641         }
642
643         inode_dio_end(file_inode(iocb->ki_filp));
644         kfree(dio);
645
646         return ret;
647 }
648
649 static void iomap_dio_complete_work(struct work_struct *work)
650 {
651         struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
652         struct kiocb *iocb = dio->iocb;
653         bool is_write = (dio->flags & IOMAP_DIO_WRITE);
654         ssize_t ret;
655
656         ret = iomap_dio_complete(dio);
657         if (is_write && ret > 0)
658                 ret = generic_write_sync(iocb, ret);
659         iocb->ki_complete(iocb, ret, 0);
660 }
661
662 /*
663  * Set an error in the dio if none is set yet.  We have to use cmpxchg
664  * as the submission context and the completion context(s) can race to
665  * update the error.
666  */
667 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
668 {
669         cmpxchg(&dio->error, 0, ret);
670 }
671
672 static void iomap_dio_bio_end_io(struct bio *bio)
673 {
674         struct iomap_dio *dio = bio->bi_private;
675         bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
676
677         if (bio->bi_error)
678                 iomap_dio_set_error(dio, bio->bi_error);
679
680         if (atomic_dec_and_test(&dio->ref)) {
681                 if (is_sync_kiocb(dio->iocb)) {
682                         struct task_struct *waiter = dio->submit.waiter;
683
684                         WRITE_ONCE(dio->submit.waiter, NULL);
685                         wake_up_process(waiter);
686                 } else if (dio->flags & IOMAP_DIO_WRITE) {
687                         struct inode *inode = file_inode(dio->iocb->ki_filp);
688
689                         INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
690                         queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
691                 } else {
692                         iomap_dio_complete_work(&dio->aio.work);
693                 }
694         }
695
696         if (should_dirty) {
697                 bio_check_pages_dirty(bio);
698         } else {
699                 struct bio_vec *bvec;
700                 int i;
701
702                 bio_for_each_segment_all(bvec, bio, i)
703                         put_page(bvec->bv_page);
704                 bio_put(bio);
705         }
706 }
707
708 static blk_qc_t
709 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
710                 unsigned len)
711 {
712         struct page *page = ZERO_PAGE(0);
713         struct bio *bio;
714
715         bio = bio_alloc(GFP_KERNEL, 1);
716         bio->bi_bdev = iomap->bdev;
717         bio->bi_iter.bi_sector =
718                 iomap->blkno + ((pos - iomap->offset) >> 9);
719         bio->bi_private = dio;
720         bio->bi_end_io = iomap_dio_bio_end_io;
721
722         get_page(page);
723         if (bio_add_page(bio, page, len, 0) != len)
724                 BUG();
725         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
726
727         atomic_inc(&dio->ref);
728         return submit_bio(bio);
729 }
730
731 static loff_t
732 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
733                 void *data, struct iomap *iomap)
734 {
735         struct iomap_dio *dio = data;
736         unsigned blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
737         unsigned fs_block_size = (1 << inode->i_blkbits), pad;
738         unsigned align = iov_iter_alignment(dio->submit.iter);
739         struct iov_iter iter;
740         struct bio *bio;
741         bool need_zeroout = false;
742         int nr_pages, ret;
743
744         if ((pos | length | align) & ((1 << blkbits) - 1))
745                 return -EINVAL;
746
747         switch (iomap->type) {
748         case IOMAP_HOLE:
749                 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
750                         return -EIO;
751                 /*FALLTHRU*/
752         case IOMAP_UNWRITTEN:
753                 if (!(dio->flags & IOMAP_DIO_WRITE)) {
754                         iov_iter_zero(length, dio->submit.iter);
755                         dio->size += length;
756                         return length;
757                 }
758                 dio->flags |= IOMAP_DIO_UNWRITTEN;
759                 need_zeroout = true;
760                 break;
761         case IOMAP_MAPPED:
762                 if (iomap->flags & IOMAP_F_SHARED)
763                         dio->flags |= IOMAP_DIO_COW;
764                 if (iomap->flags & IOMAP_F_NEW)
765                         need_zeroout = true;
766                 break;
767         default:
768                 WARN_ON_ONCE(1);
769                 return -EIO;
770         }
771
772         /*
773          * Operate on a partial iter trimmed to the extent we were called for.
774          * We'll update the iter in the dio once we're done with this extent.
775          */
776         iter = *dio->submit.iter;
777         iov_iter_truncate(&iter, length);
778
779         nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
780         if (nr_pages <= 0)
781                 return nr_pages;
782
783         if (need_zeroout) {
784                 /* zero out from the start of the block to the write offset */
785                 pad = pos & (fs_block_size - 1);
786                 if (pad)
787                         iomap_dio_zero(dio, iomap, pos - pad, pad);
788         }
789
790         do {
791                 if (dio->error)
792                         return 0;
793
794                 bio = bio_alloc(GFP_KERNEL, nr_pages);
795                 bio->bi_bdev = iomap->bdev;
796                 bio->bi_iter.bi_sector =
797                         iomap->blkno + ((pos - iomap->offset) >> 9);
798                 bio->bi_private = dio;
799                 bio->bi_end_io = iomap_dio_bio_end_io;
800
801                 ret = bio_iov_iter_get_pages(bio, &iter);
802                 if (unlikely(ret)) {
803                         bio_put(bio);
804                         return ret;
805                 }
806
807                 if (dio->flags & IOMAP_DIO_WRITE) {
808                         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
809                         task_io_account_write(bio->bi_iter.bi_size);
810                 } else {
811                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
812                         if (dio->flags & IOMAP_DIO_DIRTY)
813                                 bio_set_pages_dirty(bio);
814                 }
815
816                 dio->size += bio->bi_iter.bi_size;
817                 pos += bio->bi_iter.bi_size;
818
819                 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
820
821                 atomic_inc(&dio->ref);
822
823                 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
824                 dio->submit.cookie = submit_bio(bio);
825         } while (nr_pages);
826
827         if (need_zeroout) {
828                 /* zero out from the end of the write to the end of the block */
829                 pad = pos & (fs_block_size - 1);
830                 if (pad)
831                         iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
832         }
833
834         iov_iter_advance(dio->submit.iter, length);
835         return length;
836 }
837
838 ssize_t
839 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops,
840                 iomap_dio_end_io_t end_io)
841 {
842         struct address_space *mapping = iocb->ki_filp->f_mapping;
843         struct inode *inode = file_inode(iocb->ki_filp);
844         size_t count = iov_iter_count(iter);
845         loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
846         unsigned int flags = IOMAP_DIRECT;
847         struct blk_plug plug;
848         struct iomap_dio *dio;
849
850         lockdep_assert_held(&inode->i_rwsem);
851
852         if (!count)
853                 return 0;
854
855         dio = kmalloc(sizeof(*dio), GFP_KERNEL);
856         if (!dio)
857                 return -ENOMEM;
858
859         dio->iocb = iocb;
860         atomic_set(&dio->ref, 1);
861         dio->size = 0;
862         dio->i_size = i_size_read(inode);
863         dio->end_io = end_io;
864         dio->error = 0;
865         dio->flags = 0;
866
867         dio->submit.iter = iter;
868         if (is_sync_kiocb(iocb)) {
869                 dio->submit.waiter = current;
870                 dio->submit.cookie = BLK_QC_T_NONE;
871                 dio->submit.last_queue = NULL;
872         }
873
874         if (iov_iter_rw(iter) == READ) {
875                 if (pos >= dio->i_size)
876                         goto out_free_dio;
877
878                 if (iter->type == ITER_IOVEC)
879                         dio->flags |= IOMAP_DIO_DIRTY;
880         } else {
881                 dio->flags |= IOMAP_DIO_WRITE;
882                 flags |= IOMAP_WRITE;
883         }
884
885         if (mapping->nrpages) {
886                 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
887                 if (ret)
888                         goto out_free_dio;
889
890                 ret = invalidate_inode_pages2_range(mapping,
891                                 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
892                 WARN_ON_ONCE(ret);
893                 ret = 0;
894         }
895
896         inode_dio_begin(inode);
897
898         blk_start_plug(&plug);
899         do {
900                 ret = iomap_apply(inode, pos, count, flags, ops, dio,
901                                 iomap_dio_actor);
902                 if (ret <= 0) {
903                         /* magic error code to fall back to buffered I/O */
904                         if (ret == -ENOTBLK)
905                                 ret = 0;
906                         break;
907                 }
908                 pos += ret;
909         } while ((count = iov_iter_count(iter)) > 0);
910         blk_finish_plug(&plug);
911
912         if (ret < 0)
913                 iomap_dio_set_error(dio, ret);
914
915         if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
916                         !inode->i_sb->s_dio_done_wq) {
917                 ret = sb_init_dio_done_wq(inode->i_sb);
918                 if (ret < 0)
919                         iomap_dio_set_error(dio, ret);
920         }
921
922         if (!atomic_dec_and_test(&dio->ref)) {
923                 if (!is_sync_kiocb(iocb))
924                         return -EIOCBQUEUED;
925
926                 for (;;) {
927                         set_current_state(TASK_UNINTERRUPTIBLE);
928                         if (!READ_ONCE(dio->submit.waiter))
929                                 break;
930
931                         if (!(iocb->ki_flags & IOCB_HIPRI) ||
932                             !dio->submit.last_queue ||
933                             !blk_mq_poll(dio->submit.last_queue,
934                                          dio->submit.cookie))
935                                 io_schedule();
936                 }
937                 __set_current_state(TASK_RUNNING);
938         }
939
940         /*
941          * Try again to invalidate clean pages which might have been cached by
942          * non-direct readahead, or faulted in by get_user_pages() if the source
943          * of the write was an mmap'ed region of the file we're writing.  Either
944          * one is a pretty crazy thing to do, so we don't support it 100%.  If
945          * this invalidation fails, tough, the write still worked...
946          */
947         if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
948                 ret = invalidate_inode_pages2_range(mapping,
949                                 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
950                 WARN_ON_ONCE(ret);
951         }
952
953         return iomap_dio_complete(dio);
954
955 out_free_dio:
956         kfree(dio);
957         return ret;
958 }
959 EXPORT_SYMBOL_GPL(iomap_dio_rw);