1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/spinlock.h>
9 #include <linux/completion.h>
10 #include <linux/buffer_head.h>
11 #include <linux/pagemap.h>
12 #include <linux/uio.h>
13 #include <linux/blkdev.h>
15 #include <linux/mount.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/falloc.h>
19 #include <linux/swap.h>
20 #include <linux/crc32.h>
21 #include <linux/writeback.h>
22 #include <linux/uaccess.h>
23 #include <linux/dlm.h>
24 #include <linux/dlm_plock.h>
25 #include <linux/delay.h>
26 #include <linux/backing-dev.h>
44 * gfs2_llseek - seek to a location in a file
47 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 * SEEK_END requires the glock for the file because it references the
52 * Returns: The new offset, or errno
55 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
58 struct gfs2_holder i_gh;
63 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
66 error = generic_file_llseek(file, offset, whence);
67 gfs2_glock_dq_uninit(&i_gh);
72 error = gfs2_seek_data(file, offset);
76 error = gfs2_seek_hole(file, offset);
82 * These don't reference inode->i_size and don't depend on the
83 * block mapping, so we don't need the glock.
85 error = generic_file_llseek(file, offset, whence);
95 * gfs2_readdir - Iterator for a directory
96 * @file: The directory to read from
97 * @ctx: What to feed directory entries to
102 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
104 struct inode *dir = file->f_mapping->host;
105 struct gfs2_inode *dip = GFS2_I(dir);
106 struct gfs2_holder d_gh;
109 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
113 error = gfs2_dir_read(dir, ctx, &file->f_ra);
115 gfs2_glock_dq_uninit(&d_gh);
123 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
124 * and to GFS2_DIF_JDATA for non-directories.
129 } fsflag_gfs2flag[] = {
130 {FS_SYNC_FL, GFS2_DIF_SYNC},
131 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
132 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
133 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
134 {FS_INDEX_FL, GFS2_DIF_EXHASH},
135 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
136 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
139 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
141 struct inode *inode = file_inode(filp);
142 struct gfs2_inode *ip = GFS2_I(inode);
143 struct gfs2_holder gh;
145 u32 gfsflags, fsflags = 0;
147 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
148 error = gfs2_glock_nq(&gh);
152 gfsflags = ip->i_diskflags;
153 if (S_ISDIR(inode->i_mode))
154 gfsflags &= ~GFS2_DIF_JDATA;
156 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
157 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
158 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
159 fsflags |= fsflag_gfs2flag[i].fsflag;
161 if (put_user(fsflags, ptr))
166 gfs2_holder_uninit(&gh);
170 void gfs2_set_inode_flags(struct inode *inode)
172 struct gfs2_inode *ip = GFS2_I(inode);
173 unsigned int flags = inode->i_flags;
175 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
176 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
178 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
179 flags |= S_IMMUTABLE;
180 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
182 if (ip->i_diskflags & GFS2_DIF_NOATIME)
184 if (ip->i_diskflags & GFS2_DIF_SYNC)
186 inode->i_flags = flags;
189 /* Flags that can be set by user space */
190 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
191 GFS2_DIF_IMMUTABLE| \
192 GFS2_DIF_APPENDONLY| \
196 GFS2_DIF_INHERIT_JDATA)
199 * do_gfs2_set_flags - set flags on an inode
200 * @filp: file pointer
201 * @reqflags: The flags to set
202 * @mask: Indicates which flags are valid
205 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
207 struct inode *inode = file_inode(filp);
208 struct gfs2_inode *ip = GFS2_I(inode);
209 struct gfs2_sbd *sdp = GFS2_SB(inode);
210 struct buffer_head *bh;
211 struct gfs2_holder gh;
213 u32 new_flags, flags;
215 error = mnt_want_write_file(filp);
219 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
224 if (!inode_owner_or_capable(inode))
228 flags = ip->i_diskflags;
229 new_flags = (flags & ~mask) | (reqflags & mask);
230 if ((new_flags ^ flags) == 0)
234 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
236 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
238 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
239 !capable(CAP_LINUX_IMMUTABLE))
241 if (!IS_IMMUTABLE(inode)) {
242 error = gfs2_permission(inode, MAY_WRITE);
246 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
247 if (new_flags & GFS2_DIF_JDATA)
248 gfs2_log_flush(sdp, ip->i_gl,
249 GFS2_LOG_HEAD_FLUSH_NORMAL |
251 error = filemap_fdatawrite(inode->i_mapping);
254 error = filemap_fdatawait(inode->i_mapping);
257 if (new_flags & GFS2_DIF_JDATA)
258 gfs2_ordered_del_inode(ip);
260 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
263 error = gfs2_meta_inode_buffer(ip, &bh);
266 inode->i_ctime = current_time(inode);
267 gfs2_trans_add_meta(ip->i_gl, bh);
268 ip->i_diskflags = new_flags;
269 gfs2_dinode_out(ip, bh->b_data);
271 gfs2_set_inode_flags(inode);
272 gfs2_set_aops(inode);
276 gfs2_glock_dq_uninit(&gh);
278 mnt_drop_write_file(filp);
282 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
284 struct inode *inode = file_inode(filp);
285 u32 fsflags, gfsflags = 0;
289 if (get_user(fsflags, ptr))
292 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
293 if (fsflags & fsflag_gfs2flag[i].fsflag) {
294 fsflags &= ~fsflag_gfs2flag[i].fsflag;
295 gfsflags |= fsflag_gfs2flag[i].gfsflag;
298 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
301 mask = GFS2_FLAGS_USER_SET;
302 if (S_ISDIR(inode->i_mode)) {
303 mask &= ~GFS2_DIF_JDATA;
305 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
306 if (gfsflags & GFS2_DIF_TOPDIR)
308 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
311 return do_gfs2_set_flags(filp, gfsflags, mask);
314 static int gfs2_getlabel(struct file *filp, char __user *label)
316 struct inode *inode = file_inode(filp);
317 struct gfs2_sbd *sdp = GFS2_SB(inode);
319 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
325 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
328 case FS_IOC_GETFLAGS:
329 return gfs2_get_flags(filp, (u32 __user *)arg);
330 case FS_IOC_SETFLAGS:
331 return gfs2_set_flags(filp, (u32 __user *)arg);
333 return gfs2_fitrim(filp, (void __user *)arg);
334 case FS_IOC_GETFSLABEL:
335 return gfs2_getlabel(filp, (char __user *)arg);
342 * gfs2_size_hint - Give a hint to the size of a write request
343 * @filep: The struct file
344 * @offset: The file offset of the write
345 * @size: The length of the write
347 * When we are about to do a write, this function records the total
348 * write size in order to provide a suitable hint to the lower layers
349 * about how many blocks will be required.
353 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
355 struct inode *inode = file_inode(filep);
356 struct gfs2_sbd *sdp = GFS2_SB(inode);
357 struct gfs2_inode *ip = GFS2_I(inode);
358 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
359 int hint = min_t(size_t, INT_MAX, blks);
361 if (hint > atomic_read(&ip->i_sizehint))
362 atomic_set(&ip->i_sizehint, hint);
366 * gfs2_allocate_page_backing - Use bmap to allocate blocks
367 * @page: The (locked) page to allocate backing for
369 * We try to allocate all the blocks required for the page in
370 * one go. This might fail for various reasons, so we keep
371 * trying until all the blocks to back this page are allocated.
372 * If some of the blocks are already allocated, thats ok too.
375 static int gfs2_allocate_page_backing(struct page *page)
377 struct inode *inode = page->mapping->host;
378 struct buffer_head bh;
379 unsigned long size = PAGE_SIZE;
380 u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
385 gfs2_block_map(inode, lblock, &bh, 1);
386 if (!buffer_mapped(&bh))
389 lblock += (bh.b_size >> inode->i_blkbits);
395 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
396 * @vma: The virtual memory area
397 * @vmf: The virtual memory fault containing the page to become writable
399 * When the page becomes writable, we need to ensure that we have
400 * blocks allocated on disk to back that page.
403 static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
405 struct page *page = vmf->page;
406 struct inode *inode = file_inode(vmf->vma->vm_file);
407 struct gfs2_inode *ip = GFS2_I(inode);
408 struct gfs2_sbd *sdp = GFS2_SB(inode);
409 struct gfs2_alloc_parms ap = { .aflags = 0, };
410 unsigned long last_index;
411 u64 pos = page->index << PAGE_SHIFT;
412 unsigned int data_blocks, ind_blocks, rblocks;
413 struct gfs2_holder gh;
417 sb_start_pagefault(inode->i_sb);
419 ret = gfs2_rsqa_alloc(ip);
423 gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
425 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
426 ret = gfs2_glock_nq(&gh);
430 /* Update file times before taking page lock */
431 file_update_time(vmf->vma->vm_file);
433 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
434 set_bit(GIF_SW_PAGED, &ip->i_flags);
436 if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
438 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
445 ret = gfs2_rindex_update(sdp);
449 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
450 ap.target = data_blocks + ind_blocks;
451 ret = gfs2_quota_lock_check(ip, &ap);
454 ret = gfs2_inplace_reserve(ip, &ap);
456 goto out_quota_unlock;
458 rblocks = RES_DINODE + ind_blocks;
459 if (gfs2_is_jdata(ip))
460 rblocks += data_blocks ? data_blocks : 1;
461 if (ind_blocks || data_blocks) {
462 rblocks += RES_STATFS + RES_QUOTA;
463 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
465 ret = gfs2_trans_begin(sdp, rblocks, 0);
471 size = i_size_read(inode);
472 last_index = (size - 1) >> PAGE_SHIFT;
473 /* Check page index against inode size */
474 if (size == 0 || (page->index > last_index))
478 /* If truncated, we must retry the operation, we may have raced
479 * with the glock demotion code.
481 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
484 /* Unstuff, if required, and allocate backing blocks for page */
486 if (gfs2_is_stuffed(ip))
487 ret = gfs2_unstuff_dinode(ip, page);
489 ret = gfs2_allocate_page_backing(page);
496 gfs2_inplace_release(ip);
498 gfs2_quota_unlock(ip);
502 gfs2_holder_uninit(&gh);
504 set_page_dirty(page);
505 wait_for_stable_page(page);
508 sb_end_pagefault(inode->i_sb);
509 return block_page_mkwrite_return(ret);
512 static const struct vm_operations_struct gfs2_vm_ops = {
513 .fault = filemap_fault,
514 .map_pages = filemap_map_pages,
515 .page_mkwrite = gfs2_page_mkwrite,
520 * @file: The file to map
521 * @vma: The VMA which described the mapping
523 * There is no need to get a lock here unless we should be updating
524 * atime. We ignore any locking errors since the only consequence is
525 * a missed atime update (which will just be deferred until later).
530 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
532 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
534 if (!(file->f_flags & O_NOATIME) &&
535 !IS_NOATIME(&ip->i_inode)) {
536 struct gfs2_holder i_gh;
539 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
543 /* grab lock to update inode */
544 gfs2_glock_dq_uninit(&i_gh);
547 vma->vm_ops = &gfs2_vm_ops;
553 * gfs2_open_common - This is common to open and atomic_open
554 * @inode: The inode being opened
555 * @file: The file being opened
557 * This maybe called under a glock or not depending upon how it has
558 * been called. We must always be called under a glock for regular
559 * files, however. For other file types, it does not matter whether
560 * we hold the glock or not.
562 * Returns: Error code or 0 for success
565 int gfs2_open_common(struct inode *inode, struct file *file)
567 struct gfs2_file *fp;
570 if (S_ISREG(inode->i_mode)) {
571 ret = generic_file_open(inode, file);
576 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
580 mutex_init(&fp->f_fl_mutex);
582 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
583 file->private_data = fp;
588 * gfs2_open - open a file
589 * @inode: the inode to open
590 * @file: the struct file for this opening
592 * After atomic_open, this function is only used for opening files
593 * which are already cached. We must still get the glock for regular
594 * files to ensure that we have the file size uptodate for the large
595 * file check which is in the common code. That is only an issue for
596 * regular files though.
601 static int gfs2_open(struct inode *inode, struct file *file)
603 struct gfs2_inode *ip = GFS2_I(inode);
604 struct gfs2_holder i_gh;
606 bool need_unlock = false;
608 if (S_ISREG(ip->i_inode.i_mode)) {
609 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
616 error = gfs2_open_common(inode, file);
619 gfs2_glock_dq_uninit(&i_gh);
625 * gfs2_release - called to close a struct file
626 * @inode: the inode the struct file belongs to
627 * @file: the struct file being closed
632 static int gfs2_release(struct inode *inode, struct file *file)
634 struct gfs2_inode *ip = GFS2_I(inode);
636 kfree(file->private_data);
637 file->private_data = NULL;
639 if (!(file->f_mode & FMODE_WRITE))
642 gfs2_rsqa_delete(ip, &inode->i_writecount);
647 * gfs2_fsync - sync the dirty data for a file (across the cluster)
648 * @file: the file that points to the dentry
649 * @start: the start position in the file to sync
650 * @end: the end position in the file to sync
651 * @datasync: set if we can ignore timestamp changes
653 * We split the data flushing here so that we don't wait for the data
654 * until after we've also sent the metadata to disk. Note that for
655 * data=ordered, we will write & wait for the data at the log flush
656 * stage anyway, so this is unlikely to make much of a difference
657 * except in the data=writeback case.
659 * If the fdatawrite fails due to any reason except -EIO, we will
660 * continue the remainder of the fsync, although we'll still report
661 * the error at the end. This is to match filemap_write_and_wait_range()
667 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
670 struct address_space *mapping = file->f_mapping;
671 struct inode *inode = mapping->host;
672 int sync_state = inode->i_state & I_DIRTY_ALL;
673 struct gfs2_inode *ip = GFS2_I(inode);
674 int ret = 0, ret1 = 0;
676 if (mapping->nrpages) {
677 ret1 = filemap_fdatawrite_range(mapping, start, end);
682 if (!gfs2_is_jdata(ip))
683 sync_state &= ~I_DIRTY_PAGES;
685 sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
688 ret = sync_inode_metadata(inode, 1);
691 if (gfs2_is_jdata(ip))
692 ret = file_write_and_wait(file);
695 gfs2_ail_flush(ip->i_gl, 1);
698 if (mapping->nrpages)
699 ret = file_fdatawait_range(file, start, end);
701 return ret ? ret : ret1;
704 static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
706 struct file *file = iocb->ki_filp;
707 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
708 size_t count = iov_iter_count(to);
709 struct gfs2_holder gh;
713 return 0; /* skip atime */
715 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
716 ret = gfs2_glock_nq(&gh);
720 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
724 gfs2_holder_uninit(&gh);
728 static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
730 struct file *file = iocb->ki_filp;
731 struct inode *inode = file->f_mapping->host;
732 struct gfs2_inode *ip = GFS2_I(inode);
733 size_t len = iov_iter_count(from);
734 loff_t offset = iocb->ki_pos;
735 struct gfs2_holder gh;
739 * Deferred lock, even if its a write, since we do no allocation on
740 * this path. All we need to change is the atime, and this lock mode
741 * ensures that other nodes have flushed their buffered read caches
742 * (i.e. their page cache entries for this inode). We do not,
743 * unfortunately, have the option of only flushing a range like the
746 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
747 ret = gfs2_glock_nq(&gh);
751 /* Silently fall back to buffered I/O when writing beyond EOF */
752 if (offset + len > i_size_read(&ip->i_inode))
755 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);
760 gfs2_holder_uninit(&gh);
764 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
768 if (iocb->ki_flags & IOCB_DIRECT) {
769 ret = gfs2_file_direct_read(iocb, to);
770 if (likely(ret != -ENOTBLK))
772 iocb->ki_flags &= ~IOCB_DIRECT;
774 return generic_file_read_iter(iocb, to);
778 * gfs2_file_write_iter - Perform a write to a file
779 * @iocb: The io context
780 * @from: The data to write
782 * We have to do a lock/unlock here to refresh the inode size for
783 * O_APPEND writes, otherwise we can land up writing at the wrong
784 * offset. There is still a race, but provided the app is using its
785 * own file locking, this will make O_APPEND work as expected.
789 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
791 struct file *file = iocb->ki_filp;
792 struct inode *inode = file_inode(file);
793 struct gfs2_inode *ip = GFS2_I(inode);
794 ssize_t written = 0, ret;
796 ret = gfs2_rsqa_alloc(ip);
800 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
802 if (iocb->ki_flags & IOCB_APPEND) {
803 struct gfs2_holder gh;
805 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
808 gfs2_glock_dq_uninit(&gh);
812 ret = generic_write_checks(iocb, from);
816 /* We can write back this queue in page reclaim */
817 current->backing_dev_info = inode_to_bdi(inode);
819 ret = file_remove_privs(file);
823 ret = file_update_time(file);
827 if (iocb->ki_flags & IOCB_DIRECT) {
828 struct address_space *mapping = file->f_mapping;
832 written = gfs2_file_direct_write(iocb, from);
833 if (written < 0 || !iov_iter_count(from))
836 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
837 if (unlikely(ret < 0))
842 * We need to ensure that the page cache pages are written to
843 * disk and invalidated to preserve the expected O_DIRECT
847 endbyte = pos + buffered - 1;
848 ret = filemap_write_and_wait_range(mapping, pos, endbyte);
850 iocb->ki_pos += buffered;
852 invalidate_mapping_pages(mapping,
854 endbyte >> PAGE_SHIFT);
857 * We don't know how much we wrote, so just return
858 * the number of bytes which were direct-written
862 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
868 current->backing_dev_info = NULL;
871 if (likely(ret > 0)) {
872 /* Handle various SYNC-type writes */
873 ret = generic_write_sync(iocb, ret);
875 return written ? written : ret;
878 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
881 struct super_block *sb = inode->i_sb;
882 struct gfs2_inode *ip = GFS2_I(inode);
883 loff_t end = offset + len;
884 struct buffer_head *dibh;
887 error = gfs2_meta_inode_buffer(ip, &dibh);
891 gfs2_trans_add_meta(ip->i_gl, dibh);
893 if (gfs2_is_stuffed(ip)) {
894 error = gfs2_unstuff_dinode(ip, NULL);
899 while (offset < end) {
900 struct iomap iomap = { };
902 error = gfs2_iomap_get_alloc(inode, offset, end - offset,
906 offset = iomap.offset + iomap.length;
907 if (!(iomap.flags & IOMAP_F_NEW))
909 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
910 iomap.length >> inode->i_blkbits,
913 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
922 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
923 * blocks, determine how many bytes can be written.
924 * @ip: The inode in question.
925 * @len: Max cap of bytes. What we return in *len must be <= this.
926 * @data_blocks: Compute and return the number of data blocks needed
927 * @ind_blocks: Compute and return the number of indirect blocks needed
928 * @max_blocks: The total blocks available to work with.
930 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
932 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
933 unsigned int *data_blocks, unsigned int *ind_blocks,
934 unsigned int max_blocks)
937 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
938 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
940 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
941 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
945 *data_blocks = max_data;
946 *ind_blocks = max_blocks - max_data;
947 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
950 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
954 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
956 struct inode *inode = file_inode(file);
957 struct gfs2_sbd *sdp = GFS2_SB(inode);
958 struct gfs2_inode *ip = GFS2_I(inode);
959 struct gfs2_alloc_parms ap = { .aflags = 0, };
960 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
961 loff_t bytes, max_bytes, max_blks;
963 const loff_t pos = offset;
964 const loff_t count = len;
965 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
966 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
967 loff_t max_chunk_size = UINT_MAX & bsize_mask;
969 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
971 offset &= bsize_mask;
974 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
979 bytes = sdp->sd_sb.sb_bsize;
981 gfs2_size_hint(file, offset, len);
983 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
984 ap.min_target = data_blocks + ind_blocks;
989 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
995 /* We need to determine how many bytes we can actually
996 * fallocate without exceeding quota or going over the
997 * end of the fs. We start off optimistically by assuming
998 * we can write max_bytes */
999 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1001 /* Since max_bytes is most likely a theoretical max, we
1002 * calculate a more realistic 'bytes' to serve as a good
1003 * starting point for the number of bytes we may be able
1005 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1006 ap.target = data_blocks + ind_blocks;
1008 error = gfs2_quota_lock_check(ip, &ap);
1011 /* ap.allowed tells us how many blocks quota will allow
1012 * us to write. Check if this reduces max_blks */
1013 max_blks = UINT_MAX;
1015 max_blks = ap.allowed;
1017 error = gfs2_inplace_reserve(ip, &ap);
1021 /* check if the selected rgrp limits our max_blks further */
1022 if (ap.allowed && ap.allowed < max_blks)
1023 max_blks = ap.allowed;
1025 /* Almost done. Calculate bytes that can be written using
1026 * max_blks. We also recompute max_bytes, data_blocks and
1028 calc_max_reserv(ip, &max_bytes, &data_blocks,
1029 &ind_blocks, max_blks);
1031 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1032 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1033 if (gfs2_is_jdata(ip))
1034 rblocks += data_blocks ? data_blocks : 1;
1036 error = gfs2_trans_begin(sdp, rblocks,
1037 PAGE_SIZE/sdp->sd_sb.sb_bsize);
1039 goto out_trans_fail;
1041 error = fallocate_chunk(inode, offset, max_bytes, mode);
1042 gfs2_trans_end(sdp);
1045 goto out_trans_fail;
1048 offset += max_bytes;
1049 gfs2_inplace_release(ip);
1050 gfs2_quota_unlock(ip);
1053 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
1054 i_size_write(inode, pos + count);
1055 file_update_time(file);
1056 mark_inode_dirty(inode);
1059 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1060 return vfs_fsync_range(file, pos, pos + count - 1,
1061 (file->f_flags & __O_SYNC) ? 0 : 1);
1065 gfs2_inplace_release(ip);
1067 gfs2_quota_unlock(ip);
1071 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1073 struct inode *inode = file_inode(file);
1074 struct gfs2_sbd *sdp = GFS2_SB(inode);
1075 struct gfs2_inode *ip = GFS2_I(inode);
1076 struct gfs2_holder gh;
1079 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
1081 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1082 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
1087 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1088 ret = gfs2_glock_nq(&gh);
1092 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1093 (offset + len) > inode->i_size) {
1094 ret = inode_newsize_ok(inode, offset + len);
1099 ret = get_write_access(inode);
1103 if (mode & FALLOC_FL_PUNCH_HOLE) {
1104 ret = __gfs2_punch_hole(file, offset, len);
1106 ret = gfs2_rsqa_alloc(ip);
1110 ret = __gfs2_fallocate(file, mode, offset, len);
1113 gfs2_rs_deltree(&ip->i_res);
1117 put_write_access(inode);
1121 gfs2_holder_uninit(&gh);
1122 inode_unlock(inode);
1126 static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1127 struct file *out, loff_t *ppos,
1128 size_t len, unsigned int flags)
1131 struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
1133 error = gfs2_rsqa_alloc(ip);
1135 return (ssize_t)error;
1137 gfs2_size_hint(out, *ppos, len);
1139 return iter_file_splice_write(pipe, out, ppos, len, flags);
1142 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
1145 * gfs2_lock - acquire/release a posix lock on a file
1146 * @file: the file pointer
1147 * @cmd: either modify or retrieve lock state, possibly wait
1148 * @fl: type and range of lock
1153 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1155 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1156 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1157 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1159 if (!(fl->fl_flags & FL_POSIX))
1161 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
1164 if (cmd == F_CANCELLK) {
1167 fl->fl_type = F_UNLCK;
1169 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1170 if (fl->fl_type == F_UNLCK)
1171 locks_lock_file_wait(file, fl);
1175 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1176 else if (fl->fl_type == F_UNLCK)
1177 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1179 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1182 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1184 struct gfs2_file *fp = file->private_data;
1185 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1186 struct gfs2_inode *ip = GFS2_I(file_inode(file));
1187 struct gfs2_glock *gl;
1193 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1194 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1196 mutex_lock(&fp->f_fl_mutex);
1198 if (gfs2_holder_initialized(fl_gh)) {
1199 struct file_lock request;
1200 if (fl_gh->gh_state == state)
1202 locks_init_lock(&request);
1203 request.fl_type = F_UNLCK;
1204 request.fl_flags = FL_FLOCK;
1205 locks_lock_file_wait(file, &request);
1206 gfs2_glock_dq(fl_gh);
1207 gfs2_holder_reinit(state, flags, fl_gh);
1209 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1210 &gfs2_flock_glops, CREATE, &gl);
1213 gfs2_holder_init(gl, state, flags, fl_gh);
1216 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1217 error = gfs2_glock_nq(fl_gh);
1218 if (error != GLR_TRYFAILED)
1220 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1221 fl_gh->gh_error = 0;
1225 gfs2_holder_uninit(fl_gh);
1226 if (error == GLR_TRYFAILED)
1229 error = locks_lock_file_wait(file, fl);
1230 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1234 mutex_unlock(&fp->f_fl_mutex);
1238 static void do_unflock(struct file *file, struct file_lock *fl)
1240 struct gfs2_file *fp = file->private_data;
1241 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1243 mutex_lock(&fp->f_fl_mutex);
1244 locks_lock_file_wait(file, fl);
1245 if (gfs2_holder_initialized(fl_gh)) {
1246 gfs2_glock_dq(fl_gh);
1247 gfs2_holder_uninit(fl_gh);
1249 mutex_unlock(&fp->f_fl_mutex);
1253 * gfs2_flock - acquire/release a flock lock on a file
1254 * @file: the file pointer
1255 * @cmd: either modify or retrieve lock state, possibly wait
1256 * @fl: type and range of lock
1261 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1263 if (!(fl->fl_flags & FL_FLOCK))
1265 if (fl->fl_type & LOCK_MAND)
1268 if (fl->fl_type == F_UNLCK) {
1269 do_unflock(file, fl);
1272 return do_flock(file, cmd, fl);
1276 const struct file_operations gfs2_file_fops = {
1277 .llseek = gfs2_llseek,
1278 .read_iter = gfs2_file_read_iter,
1279 .write_iter = gfs2_file_write_iter,
1280 .iopoll = iomap_dio_iopoll,
1281 .unlocked_ioctl = gfs2_ioctl,
1284 .release = gfs2_release,
1285 .fsync = gfs2_fsync,
1287 .flock = gfs2_flock,
1288 .splice_read = generic_file_splice_read,
1289 .splice_write = gfs2_file_splice_write,
1290 .setlease = simple_nosetlease,
1291 .fallocate = gfs2_fallocate,
1294 const struct file_operations gfs2_dir_fops = {
1295 .iterate_shared = gfs2_readdir,
1296 .unlocked_ioctl = gfs2_ioctl,
1298 .release = gfs2_release,
1299 .fsync = gfs2_fsync,
1301 .flock = gfs2_flock,
1302 .llseek = default_llseek,
1305 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1307 const struct file_operations gfs2_file_fops_nolock = {
1308 .llseek = gfs2_llseek,
1309 .read_iter = gfs2_file_read_iter,
1310 .write_iter = gfs2_file_write_iter,
1311 .iopoll = iomap_dio_iopoll,
1312 .unlocked_ioctl = gfs2_ioctl,
1315 .release = gfs2_release,
1316 .fsync = gfs2_fsync,
1317 .splice_read = generic_file_splice_read,
1318 .splice_write = gfs2_file_splice_write,
1319 .setlease = generic_setlease,
1320 .fallocate = gfs2_fallocate,
1323 const struct file_operations gfs2_dir_fops_nolock = {
1324 .iterate_shared = gfs2_readdir,
1325 .unlocked_ioctl = gfs2_ioctl,
1327 .release = gfs2_release,
1328 .fsync = gfs2_fsync,
1329 .llseek = default_llseek,