Merge tag 'rtc-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[sfrench/cifs-2.6.git] / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35  * Called when an inode is released. Note that this is different
36  * from ext4_file_open: open gets called at every open, but release
37  * gets called only when /all/ the files are closed.
38  */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42                 ext4_alloc_da_blocks(inode);
43                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44         }
45         /* if we are the last writer on the inode, drop the block reservation */
46         if ((filp->f_mode & FMODE_WRITE) &&
47                         (atomic_read(&inode->i_writecount) == 1) &&
48                         !EXT4_I(inode)->i_reserved_data_blocks)
49         {
50                 down_write(&EXT4_I(inode)->i_data_sem);
51                 ext4_discard_preallocations(inode);
52                 up_write(&EXT4_I(inode)->i_data_sem);
53         }
54         if (is_dx(inode) && filp->private_data)
55                 ext4_htree_free_dir_info(filp->private_data);
56
57         return 0;
58 }
59
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62         wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68  * This tests whether the IO in question is block-aligned or not.
69  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70  * are converted to written only after the IO is complete.  Until they are
71  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72  * it needs to zero out portions of the start and/or end block.  If 2 AIO
73  * threads are at work on the same unwritten block, they must be synchronized
74  * or one thread will zero the other's data, causing corruption.
75  */
76 static int
77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
78 {
79         struct super_block *sb = inode->i_sb;
80         int blockmask = sb->s_blocksize - 1;
81
82         if (pos >= i_size_read(inode))
83                 return 0;
84
85         if ((pos | iov_iter_alignment(from)) & blockmask)
86                 return 1;
87
88         return 0;
89 }
90
91 static ssize_t
92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94         struct file *file = iocb->ki_filp;
95         struct inode *inode = file_inode(iocb->ki_filp);
96         struct blk_plug plug;
97         int o_direct = iocb->ki_flags & IOCB_DIRECT;
98         int unaligned_aio = 0;
99         int overwrite = 0;
100         ssize_t ret;
101
102         inode_lock(inode);
103         ret = generic_write_checks(iocb, from);
104         if (ret <= 0)
105                 goto out;
106
107         /*
108          * Unaligned direct AIO must be serialized among each other as zeroing
109          * of partial blocks of two competing unaligned AIOs can result in data
110          * corruption.
111          */
112         if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
113             !is_sync_kiocb(iocb) &&
114             ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
115                 unaligned_aio = 1;
116                 ext4_unwritten_wait(inode);
117         }
118
119         /*
120          * If we have encountered a bitmap-format file, the size limit
121          * is smaller than s_maxbytes, which is for extent-mapped files.
122          */
123         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
124                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
125
126                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
127                         ret = -EFBIG;
128                         goto out;
129                 }
130                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
131         }
132
133         iocb->private = &overwrite;
134         if (o_direct) {
135                 size_t length = iov_iter_count(from);
136                 loff_t pos = iocb->ki_pos;
137                 blk_start_plug(&plug);
138
139                 /* check whether we do a DIO overwrite or not */
140                 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
141                     !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
142                         struct ext4_map_blocks map;
143                         unsigned int blkbits = inode->i_blkbits;
144                         int err, len;
145
146                         map.m_lblk = pos >> blkbits;
147                         map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
148                                 - map.m_lblk;
149                         len = map.m_len;
150
151                         err = ext4_map_blocks(NULL, inode, &map, 0);
152                         /*
153                          * 'err==len' means that all of blocks has
154                          * been preallocated no matter they are
155                          * initialized or not.  For excluding
156                          * unwritten extents, we need to check
157                          * m_flags.  There are two conditions that
158                          * indicate for initialized extents.  1) If we
159                          * hit extent cache, EXT4_MAP_MAPPED flag is
160                          * returned; 2) If we do a real lookup,
161                          * non-flags are returned.  So we should check
162                          * these two conditions.
163                          */
164                         if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
165                                 overwrite = 1;
166                 }
167         }
168
169         ret = __generic_file_write_iter(iocb, from);
170         inode_unlock(inode);
171
172         if (ret > 0)
173                 ret = generic_write_sync(iocb, ret);
174         if (o_direct)
175                 blk_finish_plug(&plug);
176
177         return ret;
178
179 out:
180         inode_unlock(inode);
181         return ret;
182 }
183
184 #ifdef CONFIG_FS_DAX
185 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
186 {
187         int result;
188         handle_t *handle = NULL;
189         struct inode *inode = file_inode(vma->vm_file);
190         struct super_block *sb = inode->i_sb;
191         bool write = vmf->flags & FAULT_FLAG_WRITE;
192
193         if (write) {
194                 sb_start_pagefault(sb);
195                 file_update_time(vma->vm_file);
196                 down_read(&EXT4_I(inode)->i_mmap_sem);
197                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
198                                                 EXT4_DATA_TRANS_BLOCKS(sb));
199         } else
200                 down_read(&EXT4_I(inode)->i_mmap_sem);
201
202         if (IS_ERR(handle))
203                 result = VM_FAULT_SIGBUS;
204         else
205                 result = __dax_fault(vma, vmf, ext4_dax_mmap_get_block, NULL);
206
207         if (write) {
208                 if (!IS_ERR(handle))
209                         ext4_journal_stop(handle);
210                 up_read(&EXT4_I(inode)->i_mmap_sem);
211                 sb_end_pagefault(sb);
212         } else
213                 up_read(&EXT4_I(inode)->i_mmap_sem);
214
215         return result;
216 }
217
218 static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
219                                                 pmd_t *pmd, unsigned int flags)
220 {
221         int result;
222         handle_t *handle = NULL;
223         struct inode *inode = file_inode(vma->vm_file);
224         struct super_block *sb = inode->i_sb;
225         bool write = flags & FAULT_FLAG_WRITE;
226
227         if (write) {
228                 sb_start_pagefault(sb);
229                 file_update_time(vma->vm_file);
230                 down_read(&EXT4_I(inode)->i_mmap_sem);
231                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
232                                 ext4_chunk_trans_blocks(inode,
233                                                         PMD_SIZE / PAGE_SIZE));
234         } else
235                 down_read(&EXT4_I(inode)->i_mmap_sem);
236
237         if (IS_ERR(handle))
238                 result = VM_FAULT_SIGBUS;
239         else
240                 result = __dax_pmd_fault(vma, addr, pmd, flags,
241                                 ext4_dax_mmap_get_block, NULL);
242
243         if (write) {
244                 if (!IS_ERR(handle))
245                         ext4_journal_stop(handle);
246                 up_read(&EXT4_I(inode)->i_mmap_sem);
247                 sb_end_pagefault(sb);
248         } else
249                 up_read(&EXT4_I(inode)->i_mmap_sem);
250
251         return result;
252 }
253
254 /*
255  * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
256  * handler we check for races agaist truncate. Note that since we cycle through
257  * i_mmap_sem, we are sure that also any hole punching that began before we
258  * were called is finished by now and so if it included part of the file we
259  * are working on, our pte will get unmapped and the check for pte_same() in
260  * wp_pfn_shared() fails. Thus fault gets retried and things work out as
261  * desired.
262  */
263 static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
264                                 struct vm_fault *vmf)
265 {
266         struct inode *inode = file_inode(vma->vm_file);
267         struct super_block *sb = inode->i_sb;
268         loff_t size;
269         int ret;
270
271         sb_start_pagefault(sb);
272         file_update_time(vma->vm_file);
273         down_read(&EXT4_I(inode)->i_mmap_sem);
274         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
275         if (vmf->pgoff >= size)
276                 ret = VM_FAULT_SIGBUS;
277         else
278                 ret = dax_pfn_mkwrite(vma, vmf);
279         up_read(&EXT4_I(inode)->i_mmap_sem);
280         sb_end_pagefault(sb);
281
282         return ret;
283 }
284
285 static const struct vm_operations_struct ext4_dax_vm_ops = {
286         .fault          = ext4_dax_fault,
287         .pmd_fault      = ext4_dax_pmd_fault,
288         .page_mkwrite   = ext4_dax_fault,
289         .pfn_mkwrite    = ext4_dax_pfn_mkwrite,
290 };
291 #else
292 #define ext4_dax_vm_ops ext4_file_vm_ops
293 #endif
294
295 static const struct vm_operations_struct ext4_file_vm_ops = {
296         .fault          = ext4_filemap_fault,
297         .map_pages      = filemap_map_pages,
298         .page_mkwrite   = ext4_page_mkwrite,
299 };
300
301 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
302 {
303         struct inode *inode = file->f_mapping->host;
304
305         if (ext4_encrypted_inode(inode)) {
306                 int err = ext4_get_encryption_info(inode);
307                 if (err)
308                         return 0;
309                 if (ext4_encryption_info(inode) == NULL)
310                         return -ENOKEY;
311         }
312         file_accessed(file);
313         if (IS_DAX(file_inode(file))) {
314                 vma->vm_ops = &ext4_dax_vm_ops;
315                 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
316         } else {
317                 vma->vm_ops = &ext4_file_vm_ops;
318         }
319         return 0;
320 }
321
322 static int ext4_file_open(struct inode * inode, struct file * filp)
323 {
324         struct super_block *sb = inode->i_sb;
325         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
326         struct vfsmount *mnt = filp->f_path.mnt;
327         struct dentry *dir;
328         struct path path;
329         char buf[64], *cp;
330         int ret;
331
332         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
333                      !(sb->s_flags & MS_RDONLY))) {
334                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
335                 /*
336                  * Sample where the filesystem has been mounted and
337                  * store it in the superblock for sysadmin convenience
338                  * when trying to sort through large numbers of block
339                  * devices or filesystem images.
340                  */
341                 memset(buf, 0, sizeof(buf));
342                 path.mnt = mnt;
343                 path.dentry = mnt->mnt_root;
344                 cp = d_path(&path, buf, sizeof(buf));
345                 if (!IS_ERR(cp)) {
346                         handle_t *handle;
347                         int err;
348
349                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
350                         if (IS_ERR(handle))
351                                 return PTR_ERR(handle);
352                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
353                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
354                         if (err) {
355                                 ext4_journal_stop(handle);
356                                 return err;
357                         }
358                         strlcpy(sbi->s_es->s_last_mounted, cp,
359                                 sizeof(sbi->s_es->s_last_mounted));
360                         ext4_handle_dirty_super(handle, sb);
361                         ext4_journal_stop(handle);
362                 }
363         }
364         if (ext4_encrypted_inode(inode)) {
365                 ret = ext4_get_encryption_info(inode);
366                 if (ret)
367                         return -EACCES;
368                 if (ext4_encryption_info(inode) == NULL)
369                         return -ENOKEY;
370         }
371
372         dir = dget_parent(file_dentry(filp));
373         if (ext4_encrypted_inode(d_inode(dir)) &&
374             !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
375                 ext4_warning(inode->i_sb,
376                              "Inconsistent encryption contexts: %lu/%lu\n",
377                              (unsigned long) d_inode(dir)->i_ino,
378                              (unsigned long) inode->i_ino);
379                 dput(dir);
380                 return -EPERM;
381         }
382         dput(dir);
383         /*
384          * Set up the jbd2_inode if we are opening the inode for
385          * writing and the journal is present
386          */
387         if (filp->f_mode & FMODE_WRITE) {
388                 ret = ext4_inode_attach_jinode(inode);
389                 if (ret < 0)
390                         return ret;
391         }
392         return dquot_file_open(inode, filp);
393 }
394
395 /*
396  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
397  * file rather than ext4_ext_walk_space() because we can introduce
398  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
399  * function.  When extent status tree has been fully implemented, it will
400  * track all extent status for a file and we can directly use it to
401  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
402  */
403
404 /*
405  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
406  * lookup page cache to check whether or not there has some data between
407  * [startoff, endoff] because, if this range contains an unwritten extent,
408  * we determine this extent as a data or a hole according to whether the
409  * page cache has data or not.
410  */
411 static int ext4_find_unwritten_pgoff(struct inode *inode,
412                                      int whence,
413                                      ext4_lblk_t end_blk,
414                                      loff_t *offset)
415 {
416         struct pagevec pvec;
417         unsigned int blkbits;
418         pgoff_t index;
419         pgoff_t end;
420         loff_t endoff;
421         loff_t startoff;
422         loff_t lastoff;
423         int found = 0;
424
425         blkbits = inode->i_sb->s_blocksize_bits;
426         startoff = *offset;
427         lastoff = startoff;
428         endoff = (loff_t)end_blk << blkbits;
429
430         index = startoff >> PAGE_SHIFT;
431         end = endoff >> PAGE_SHIFT;
432
433         pagevec_init(&pvec, 0);
434         do {
435                 int i, num;
436                 unsigned long nr_pages;
437
438                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
439                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
440                                           (pgoff_t)num);
441                 if (nr_pages == 0) {
442                         if (whence == SEEK_DATA)
443                                 break;
444
445                         BUG_ON(whence != SEEK_HOLE);
446                         /*
447                          * If this is the first time to go into the loop and
448                          * offset is not beyond the end offset, it will be a
449                          * hole at this offset
450                          */
451                         if (lastoff == startoff || lastoff < endoff)
452                                 found = 1;
453                         break;
454                 }
455
456                 /*
457                  * If this is the first time to go into the loop and
458                  * offset is smaller than the first page offset, it will be a
459                  * hole at this offset.
460                  */
461                 if (lastoff == startoff && whence == SEEK_HOLE &&
462                     lastoff < page_offset(pvec.pages[0])) {
463                         found = 1;
464                         break;
465                 }
466
467                 for (i = 0; i < nr_pages; i++) {
468                         struct page *page = pvec.pages[i];
469                         struct buffer_head *bh, *head;
470
471                         /*
472                          * If the current offset is not beyond the end of given
473                          * range, it will be a hole.
474                          */
475                         if (lastoff < endoff && whence == SEEK_HOLE &&
476                             page->index > end) {
477                                 found = 1;
478                                 *offset = lastoff;
479                                 goto out;
480                         }
481
482                         lock_page(page);
483
484                         if (unlikely(page->mapping != inode->i_mapping)) {
485                                 unlock_page(page);
486                                 continue;
487                         }
488
489                         if (!page_has_buffers(page)) {
490                                 unlock_page(page);
491                                 continue;
492                         }
493
494                         if (page_has_buffers(page)) {
495                                 lastoff = page_offset(page);
496                                 bh = head = page_buffers(page);
497                                 do {
498                                         if (buffer_uptodate(bh) ||
499                                             buffer_unwritten(bh)) {
500                                                 if (whence == SEEK_DATA)
501                                                         found = 1;
502                                         } else {
503                                                 if (whence == SEEK_HOLE)
504                                                         found = 1;
505                                         }
506                                         if (found) {
507                                                 *offset = max_t(loff_t,
508                                                         startoff, lastoff);
509                                                 unlock_page(page);
510                                                 goto out;
511                                         }
512                                         lastoff += bh->b_size;
513                                         bh = bh->b_this_page;
514                                 } while (bh != head);
515                         }
516
517                         lastoff = page_offset(page) + PAGE_SIZE;
518                         unlock_page(page);
519                 }
520
521                 /*
522                  * The no. of pages is less than our desired, that would be a
523                  * hole in there.
524                  */
525                 if (nr_pages < num && whence == SEEK_HOLE) {
526                         found = 1;
527                         *offset = lastoff;
528                         break;
529                 }
530
531                 index = pvec.pages[i - 1]->index + 1;
532                 pagevec_release(&pvec);
533         } while (index <= end);
534
535 out:
536         pagevec_release(&pvec);
537         return found;
538 }
539
540 /*
541  * ext4_seek_data() retrieves the offset for SEEK_DATA.
542  */
543 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
544 {
545         struct inode *inode = file->f_mapping->host;
546         struct extent_status es;
547         ext4_lblk_t start, last, end;
548         loff_t dataoff, isize;
549         int blkbits;
550         int ret;
551
552         inode_lock(inode);
553
554         isize = i_size_read(inode);
555         if (offset >= isize) {
556                 inode_unlock(inode);
557                 return -ENXIO;
558         }
559
560         blkbits = inode->i_sb->s_blocksize_bits;
561         start = offset >> blkbits;
562         last = start;
563         end = isize >> blkbits;
564         dataoff = offset;
565
566         do {
567                 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
568                 if (ret <= 0) {
569                         /* No extent found -> no data */
570                         if (ret == 0)
571                                 ret = -ENXIO;
572                         inode_unlock(inode);
573                         return ret;
574                 }
575
576                 last = es.es_lblk;
577                 if (last != start)
578                         dataoff = (loff_t)last << blkbits;
579                 if (!ext4_es_is_unwritten(&es))
580                         break;
581
582                 /*
583                  * If there is a unwritten extent at this offset,
584                  * it will be as a data or a hole according to page
585                  * cache that has data or not.
586                  */
587                 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
588                                               es.es_lblk + es.es_len, &dataoff))
589                         break;
590                 last += es.es_len;
591                 dataoff = (loff_t)last << blkbits;
592                 cond_resched();
593         } while (last <= end);
594
595         inode_unlock(inode);
596
597         if (dataoff > isize)
598                 return -ENXIO;
599
600         return vfs_setpos(file, dataoff, maxsize);
601 }
602
603 /*
604  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
605  */
606 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
607 {
608         struct inode *inode = file->f_mapping->host;
609         struct extent_status es;
610         ext4_lblk_t start, last, end;
611         loff_t holeoff, isize;
612         int blkbits;
613         int ret;
614
615         inode_lock(inode);
616
617         isize = i_size_read(inode);
618         if (offset >= isize) {
619                 inode_unlock(inode);
620                 return -ENXIO;
621         }
622
623         blkbits = inode->i_sb->s_blocksize_bits;
624         start = offset >> blkbits;
625         last = start;
626         end = isize >> blkbits;
627         holeoff = offset;
628
629         do {
630                 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
631                 if (ret < 0) {
632                         inode_unlock(inode);
633                         return ret;
634                 }
635                 /* Found a hole? */
636                 if (ret == 0 || es.es_lblk > last) {
637                         if (last != start)
638                                 holeoff = (loff_t)last << blkbits;
639                         break;
640                 }
641                 /*
642                  * If there is a unwritten extent at this offset,
643                  * it will be as a data or a hole according to page
644                  * cache that has data or not.
645                  */
646                 if (ext4_es_is_unwritten(&es) &&
647                     ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
648                                               last + es.es_len, &holeoff))
649                         break;
650
651                 last += es.es_len;
652                 holeoff = (loff_t)last << blkbits;
653                 cond_resched();
654         } while (last <= end);
655
656         inode_unlock(inode);
657
658         if (holeoff > isize)
659                 holeoff = isize;
660
661         return vfs_setpos(file, holeoff, maxsize);
662 }
663
664 /*
665  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
666  * by calling generic_file_llseek_size() with the appropriate maxbytes
667  * value for each.
668  */
669 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
670 {
671         struct inode *inode = file->f_mapping->host;
672         loff_t maxbytes;
673
674         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
675                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
676         else
677                 maxbytes = inode->i_sb->s_maxbytes;
678
679         switch (whence) {
680         case SEEK_SET:
681         case SEEK_CUR:
682         case SEEK_END:
683                 return generic_file_llseek_size(file, offset, whence,
684                                                 maxbytes, i_size_read(inode));
685         case SEEK_DATA:
686                 return ext4_seek_data(file, offset, maxbytes);
687         case SEEK_HOLE:
688                 return ext4_seek_hole(file, offset, maxbytes);
689         }
690
691         return -EINVAL;
692 }
693
694 const struct file_operations ext4_file_operations = {
695         .llseek         = ext4_llseek,
696         .read_iter      = generic_file_read_iter,
697         .write_iter     = ext4_file_write_iter,
698         .unlocked_ioctl = ext4_ioctl,
699 #ifdef CONFIG_COMPAT
700         .compat_ioctl   = ext4_compat_ioctl,
701 #endif
702         .mmap           = ext4_file_mmap,
703         .open           = ext4_file_open,
704         .release        = ext4_release_file,
705         .fsync          = ext4_sync_file,
706         .splice_read    = generic_file_splice_read,
707         .splice_write   = iter_file_splice_write,
708         .fallocate      = ext4_fallocate,
709 };
710
711 const struct inode_operations ext4_file_inode_operations = {
712         .setattr        = ext4_setattr,
713         .getattr        = ext4_getattr,
714         .setxattr       = generic_setxattr,
715         .getxattr       = generic_getxattr,
716         .listxattr      = ext4_listxattr,
717         .removexattr    = generic_removexattr,
718         .get_acl        = ext4_get_acl,
719         .set_acl        = ext4_set_acl,
720         .fiemap         = ext4_fiemap,
721 };
722