Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 Aug 2010 21:33:38 +0000 (14:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 Aug 2010 21:33:38 +0000 (14:33 -0700)
* 'for-linus' of git://oss.sgi.com/xfs/xfs: (49 commits)
  xfs simplify and speed up direct I/O completions
  xfs: move aio completion after unwritten extent conversion
  direct-io: move aio_complete into ->end_io
  xfs: fix big endian build
  xfs: clean up xfs_bmap_get_bp
  xfs: simplify xfs_truncate_file
  xfs: kill the b_strat callback in xfs_buf
  xfs: remove obsolete osyncisosync mount option
  xfs: clean up filestreams helpers
  xfs: fix gcc 4.6 set but not read and unused statement warnings
  xfs: Fix build when CONFIG_XFS_POSIX_ACL=n
  xfs: fix unsigned underflow in xfs_free_eofblocks
  xfs: use GFP_NOFS for page cache allocation
  xfs: fix memory reclaim recursion deadlock on locked inode buffer
  xfs: fix xfs_trans_add_item() lockdep warnings
  xfs: simplify and remove xfs_ireclaim
  xfs: don't block on buffer read errors
  xfs: move inode shrinker unregister even earlier
  xfs: remove a dmapi leftover
  xfs: writepage always has buffers
  ...

91 files changed:
Documentation/filesystems/xfs.txt
fs/direct-io.c
fs/ext4/inode.c
fs/ocfs2/aops.c
fs/xfs/Makefile
fs/xfs/linux-2.6/xfs_acl.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_aops.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_dmapi_priv.h [deleted file]
fs/xfs/linux-2.6/xfs_export.c
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_fs_subr.c
fs/xfs/linux-2.6/xfs_fs_subr.h [deleted file]
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_ioctl32.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_linux.h
fs/xfs/linux-2.6/xfs_quotaops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_super.h
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/linux-2.6/xfs_trace.c
fs/xfs/linux-2.6/xfs_trace.h
fs/xfs/quota/xfs_dquot.c
fs/xfs/quota/xfs_dquot_item.c
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm_bhv.c
fs/xfs/quota/xfs_qm_stats.c
fs/xfs/quota/xfs_qm_syscalls.c
fs/xfs/quota/xfs_trans_dquot.c
fs/xfs/support/debug.c
fs/xfs/xfs_alloc.c
fs/xfs/xfs_alloc.h
fs/xfs/xfs_alloc_btree.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_bmap_btree.c
fs/xfs/xfs_btree.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_da_btree.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_dir2.c
fs/xfs/xfs_dir2_block.c
fs/xfs/xfs_dir2_data.c
fs/xfs/xfs_dir2_leaf.c
fs/xfs/xfs_dir2_node.c
fs/xfs/xfs_dir2_sf.c
fs/xfs/xfs_dmapi.h [deleted file]
fs/xfs/xfs_dmops.c [deleted file]
fs/xfs/xfs_error.c
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_filestream.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_ialloc_btree.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.h
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.h
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_rename.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_rw.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_buf.c
fs/xfs/xfs_trans_extfree.c
fs/xfs/xfs_trans_inode.c
fs/xfs/xfs_trans_item.c [deleted file]
fs/xfs/xfs_trans_priv.h
fs/xfs/xfs_utils.c
fs/xfs/xfs_utils.h
fs/xfs/xfs_vnodeops.c
include/linux/fs.h

index 9878f50d6ed6d37b3241eb558a58d08681898927..7bff3e4f35df84a9def7507badb42ccdeffef419 100644 (file)
@@ -131,17 +131,6 @@ When mounting an XFS filesystem, the following options are accepted.
        Don't check for double mounted file systems using the file system uuid.
        This is useful to mount LVM snapshot volumes.
 
-  osyncisosync
-       Make O_SYNC writes implement true O_SYNC.  WITHOUT this option,
-       Linux XFS behaves as if an "osyncisdsync" option is used,
-       which will make writes to files opened with the O_SYNC flag set
-       behave as if the O_DSYNC flag had been used instead.
-       This can result in better performance without compromising
-       data safety.
-       However if this option is not in effect, timestamp updates from
-       O_SYNC writes can be lost if the system crashes.
-       If timestamp updates are critical, use the osyncisosync option.
-
   uquota/usrquota/uqnoenforce/quota
        User disk quota accounting enabled, and limits (optionally)
        enforced.  Refer to xfs_quota(8) for further details.
index 7600aacf531dc8ed16ccfb727878d13834a16503..a10cb91cadea04ac68ff5ce0db34d6d07bdd6371 100644 (file)
@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio)
  * filesystems can use it to hold additional state between get_block calls and
  * dio_complete.
  */
-static int dio_complete(struct dio *dio, loff_t offset, int ret)
+static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async)
 {
        ssize_t transferred = 0;
 
@@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
                        transferred = dio->i_size - offset;
        }
 
-       if (dio->end_io && dio->result)
-               dio->end_io(dio->iocb, offset, transferred,
-                           dio->map_bh.b_private);
-
-       if (dio->flags & DIO_LOCKING)
-               /* lockdep: non-owner release */
-               up_read_non_owner(&dio->inode->i_alloc_sem);
-
        if (ret == 0)
                ret = dio->page_errors;
        if (ret == 0)
@@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
        if (ret == 0)
                ret = transferred;
 
+       if (dio->end_io && dio->result) {
+               dio->end_io(dio->iocb, offset, transferred,
+                           dio->map_bh.b_private, ret, is_async);
+       } else if (is_async) {
+               aio_complete(dio->iocb, ret, 0);
+       }
+
+       if (dio->flags & DIO_LOCKING)
+               /* lockdep: non-owner release */
+               up_read_non_owner(&dio->inode->i_alloc_sem);
+
        return ret;
 }
 
@@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
        if (remaining == 0) {
-               int ret = dio_complete(dio, dio->iocb->ki_pos, 0);
-               aio_complete(dio->iocb, ret, 0);
+               dio_complete(dio, dio->iocb->ki_pos, 0, true);
                kfree(dio);
        }
 }
@@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
        if (ret2 == 0) {
-               ret = dio_complete(dio, offset, ret);
+               ret = dio_complete(dio, offset, ret, false);
                kfree(dio);
        } else
                BUG_ON(ret != -EIOCBQUEUED);
index 42272d67955a413cdd17a53980fe5dac410cceff..0afc8c1d8cf3597bf075ecb731a16accb67687ad 100644 (file)
@@ -3775,7 +3775,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
 }
 
 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
-                           ssize_t size, void *private)
+                           ssize_t size, void *private, int ret,
+                           bool is_async)
 {
         ext4_io_end_t *io_end = iocb->private;
        struct workqueue_struct *wq;
@@ -3784,7 +3785,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
 
        /* if not async direct IO or dio with 0 bytes write, just return */
        if (!io_end || !size)
-               return;
+               goto out;
 
        ext_debug("ext4_end_io_dio(): io_end 0x%p"
                  "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
@@ -3795,7 +3796,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        if (io_end->flag != EXT4_IO_UNWRITTEN){
                ext4_free_io_end(io_end);
                iocb->private = NULL;
-               return;
+               goto out;
        }
 
        io_end->offset = offset;
@@ -3812,6 +3813,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        list_add_tail(&io_end->list, &ei->i_completed_io_list);
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
        iocb->private = NULL;
+out:
+       if (is_async)
+               aio_complete(iocb, ret, 0);
 }
 
 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
index 356e976772bf1adb112aaf3c8b28ddb71b639afb..96337a4fbbdfc3840cffc3a458ee6f37ed0655ce 100644 (file)
@@ -578,7 +578,9 @@ bail:
 static void ocfs2_dio_end_io(struct kiocb *iocb,
                             loff_t offset,
                             ssize_t bytes,
-                            void *private)
+                            void *private,
+                            int ret,
+                            bool is_async)
 {
        struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
        int level;
@@ -592,6 +594,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
        if (!level)
                up_read(&inode->i_alloc_sem);
        ocfs2_rw_unlock(inode, level);
+
+       if (is_async)
+               aio_complete(iocb, ret, 0);
 }
 
 /*
index c8fb13f83b3f1f95dc642df019a0d14072ed6d15..0dce969d6cad61840430a40f3972de64a48bfdb0 100644 (file)
@@ -87,11 +87,9 @@ xfs-y                                += xfs_alloc.o \
                                   xfs_trans_buf.o \
                                   xfs_trans_extfree.o \
                                   xfs_trans_inode.o \
-                                  xfs_trans_item.o \
                                   xfs_utils.o \
                                   xfs_vnodeops.o \
-                                  xfs_rw.o \
-                                  xfs_dmops.o
+                                  xfs_rw.o
 
 xfs-$(CONFIG_XFS_TRACE)                += xfs_btree_trace.o
 
index 9f769b5b38fc4c4b41e2b7f356889fd1d5f9a1f6..b2771862fd3df79fbb721b3c5c93865680a8e145 100644 (file)
@@ -225,7 +225,7 @@ xfs_check_acl(struct inode *inode, int mask)
        struct posix_acl *acl;
        int error = -EAGAIN;
 
-       xfs_itrace_entry(ip);
+       trace_xfs_check_acl(ip);
 
        /*
         * If there is no attribute fork no ACL exists on this inode and
index 34640d6dbdcb8e6d5879c50a3acafed0cc819708..d24e78f32f3e3f5f41331c80f1f46e86fb700a9a 100644 (file)
 #include "xfs_inum.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_trans.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
-#include "xfs_btree.h"
 #include "xfs_error.h"
 #include "xfs_rw.h"
 #include "xfs_iomap.h"
@@ -92,18 +85,15 @@ void
 xfs_count_page_state(
        struct page             *page,
        int                     *delalloc,
-       int                     *unmapped,
        int                     *unwritten)
 {
        struct buffer_head      *bh, *head;
 
-       *delalloc = *unmapped = *unwritten = 0;
+       *delalloc = *unwritten = 0;
 
        bh = head = page_buffers(page);
        do {
-               if (buffer_uptodate(bh) && !buffer_mapped(bh))
-                       (*unmapped) = 1;
-               else if (buffer_unwritten(bh))
+               if (buffer_unwritten(bh))
                        (*unwritten) = 1;
                else if (buffer_delay(bh))
                        (*delalloc) = 1;
@@ -212,23 +202,17 @@ xfs_setfilesize(
 }
 
 /*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
+ * Schedule IO completion handling on the final put of an ioend.
  */
 STATIC void
 xfs_finish_ioend(
-       xfs_ioend_t     *ioend,
-       int             wait)
+       struct xfs_ioend        *ioend)
 {
        if (atomic_dec_and_test(&ioend->io_remaining)) {
-               struct workqueue_struct *wq;
-
-               wq = (ioend->io_type == IO_UNWRITTEN) ?
-                       xfsconvertd_workqueue : xfsdatad_workqueue;
-               queue_work(wq, &ioend->io_work);
-               if (wait)
-                       flush_workqueue(wq);
+               if (ioend->io_type == IO_UNWRITTEN)
+                       queue_work(xfsconvertd_workqueue, &ioend->io_work);
+               else
+                       queue_work(xfsdatad_workqueue, &ioend->io_work);
        }
 }
 
@@ -272,11 +256,25 @@ xfs_end_io(
         */
        if (error == EAGAIN) {
                atomic_inc(&ioend->io_remaining);
-               xfs_finish_ioend(ioend, 0);
+               xfs_finish_ioend(ioend);
                /* ensure we don't spin on blocked ioends */
                delay(1);
-       } else
+       } else {
+               if (ioend->io_iocb)
+                       aio_complete(ioend->io_iocb, ioend->io_result, 0);
                xfs_destroy_ioend(ioend);
+       }
+}
+
+/*
+ * Call IO completion handling in caller context on the final put of an ioend.
+ */
+STATIC void
+xfs_finish_ioend_sync(
+       struct xfs_ioend        *ioend)
+{
+       if (atomic_dec_and_test(&ioend->io_remaining))
+               xfs_end_io(&ioend->io_work);
 }
 
 /*
@@ -309,6 +307,8 @@ xfs_alloc_ioend(
        atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
        ioend->io_offset = 0;
        ioend->io_size = 0;
+       ioend->io_iocb = NULL;
+       ioend->io_result = 0;
 
        INIT_WORK(&ioend->io_work, xfs_end_io);
        return ioend;
@@ -358,7 +358,7 @@ xfs_end_bio(
        bio->bi_end_io = NULL;
        bio_put(bio);
 
-       xfs_finish_ioend(ioend, 0);
+       xfs_finish_ioend(ioend);
 }
 
 STATIC void
@@ -500,7 +500,7 @@ xfs_submit_ioend(
                }
                if (bio)
                        xfs_submit_ioend_bio(wbc, ioend, bio);
-               xfs_finish_ioend(ioend, 0);
+               xfs_finish_ioend(ioend);
        } while ((ioend = next) != NULL);
 }
 
@@ -614,31 +614,30 @@ xfs_map_at_offset(
 STATIC unsigned int
 xfs_probe_page(
        struct page             *page,
-       unsigned int            pg_offset,
-       int                     mapped)
+       unsigned int            pg_offset)
 {
+       struct buffer_head      *bh, *head;
        int                     ret = 0;
 
        if (PageWriteback(page))
                return 0;
+       if (!PageDirty(page))
+               return 0;
+       if (!page->mapping)
+               return 0;
+       if (!page_has_buffers(page))
+               return 0;
 
-       if (page->mapping && PageDirty(page)) {
-               if (page_has_buffers(page)) {
-                       struct buffer_head      *bh, *head;
-
-                       bh = head = page_buffers(page);
-                       do {
-                               if (!buffer_uptodate(bh))
-                                       break;
-                               if (mapped != buffer_mapped(bh))
-                                       break;
-                               ret += bh->b_size;
-                               if (ret >= pg_offset)
-                                       break;
-                       } while ((bh = bh->b_this_page) != head);
-               } else
-                       ret = mapped ? 0 : PAGE_CACHE_SIZE;
-       }
+       bh = head = page_buffers(page);
+       do {
+               if (!buffer_uptodate(bh))
+                       break;
+               if (!buffer_mapped(bh))
+                       break;
+               ret += bh->b_size;
+               if (ret >= pg_offset)
+                       break;
+       } while ((bh = bh->b_this_page) != head);
 
        return ret;
 }
@@ -648,8 +647,7 @@ xfs_probe_cluster(
        struct inode            *inode,
        struct page             *startpage,
        struct buffer_head      *bh,
-       struct buffer_head      *head,
-       int                     mapped)
+       struct buffer_head      *head)
 {
        struct pagevec          pvec;
        pgoff_t                 tindex, tlast, tloff;
@@ -658,7 +656,7 @@ xfs_probe_cluster(
 
        /* First sum forwards in this page */
        do {
-               if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
+               if (!buffer_uptodate(bh) || !buffer_mapped(bh))
                        return total;
                total += bh->b_size;
        } while ((bh = bh->b_this_page) != head);
@@ -692,7 +690,7 @@ xfs_probe_cluster(
                                pg_offset = PAGE_CACHE_SIZE;
 
                        if (page->index == tindex && trylock_page(page)) {
-                               pg_len = xfs_probe_page(page, pg_offset, mapped);
+                               pg_len = xfs_probe_page(page, pg_offset);
                                unlock_page(page);
                        }
 
@@ -761,7 +759,6 @@ xfs_convert_page(
        struct xfs_bmbt_irec    *imap,
        xfs_ioend_t             **ioendp,
        struct writeback_control *wbc,
-       int                     startio,
        int                     all_bh)
 {
        struct buffer_head      *bh, *head;
@@ -832,19 +829,14 @@ xfs_convert_page(
                        ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
 
                        xfs_map_at_offset(inode, bh, imap, offset);
-                       if (startio) {
-                               xfs_add_to_ioend(inode, bh, offset,
-                                               type, ioendp, done);
-                       } else {
-                               set_buffer_dirty(bh);
-                               unlock_buffer(bh);
-                               mark_buffer_dirty(bh);
-                       }
+                       xfs_add_to_ioend(inode, bh, offset, type,
+                                        ioendp, done);
+
                        page_dirty--;
                        count++;
                } else {
                        type = IO_NEW;
-                       if (buffer_mapped(bh) && all_bh && startio) {
+                       if (buffer_mapped(bh) && all_bh) {
                                lock_buffer(bh);
                                xfs_add_to_ioend(inode, bh, offset,
                                                type, ioendp, done);
@@ -859,14 +851,12 @@ xfs_convert_page(
        if (uptodate && bh == head)
                SetPageUptodate(page);
 
-       if (startio) {
-               if (count) {
-                       wbc->nr_to_write--;
-                       if (wbc->nr_to_write <= 0)
-                               done = 1;
-               }
-               xfs_start_page_writeback(page, !page_dirty, count);
+       if (count) {
+               wbc->nr_to_write--;
+               if (wbc->nr_to_write <= 0)
+                       done = 1;
        }
+       xfs_start_page_writeback(page, !page_dirty, count);
 
        return done;
  fail_unlock_page:
@@ -886,7 +876,6 @@ xfs_cluster_write(
        struct xfs_bmbt_irec    *imap,
        xfs_ioend_t             **ioendp,
        struct writeback_control *wbc,
-       int                     startio,
        int                     all_bh,
        pgoff_t                 tlast)
 {
@@ -902,7 +891,7 @@ xfs_cluster_write(
 
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        done = xfs_convert_page(inode, pvec.pages[i], tindex++,
-                                       imap, ioendp, wbc, startio, all_bh);
+                                       imap, ioendp, wbc, all_bh);
                        if (done)
                                break;
                }
@@ -981,7 +970,7 @@ xfs_aops_discard_page(
                 */
                error = xfs_bmapi(NULL, ip, offset_fsb, 1,
                                XFS_BMAPI_ENTIRE,  NULL, 0, &imap,
-                               &nimaps, NULL, NULL);
+                               &nimaps, NULL);
 
                if (error) {
                        /* something screwed, just bail */
@@ -1009,7 +998,7 @@ xfs_aops_discard_page(
                 */
                xfs_bmap_init(&flist, &firstblock);
                error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
-                                       &flist, NULL, &done);
+                                       &flist, &done);
 
                ASSERT(!flist.xbf_count && !flist.xbf_first);
                if (error) {
@@ -1032,50 +1021,66 @@ out_invalidate:
 }
 
 /*
- * Calling this without startio set means we are being asked to make a dirty
- * page ready for freeing it's buffers.  When called with startio set then
- * we are coming from writepage.
+ * Write out a dirty page.
+ *
+ * For delalloc space on the page we need to allocate space and flush it.
+ * For unwritten space on the page we need to start the conversion to
+ * regular allocated space.
+ * For any other dirty buffer heads on the page we should flush them.
  *
- * When called with startio set it is important that we write the WHOLE
- * page if possible.
- * The bh->b_state's cannot know if any of the blocks or which block for
- * that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only valid if the page itself isn't completely uptodate.  Some layers
- * may clear the page dirty flag prior to calling write page, under the
- * assumption the entire page will be written out; by not writing out the
- * whole page the page can be reused before all valid dirty data is
- * written out.  Note: in the case of a page that has been dirty'd by
- * mapwrite and but partially setup by block_prepare_write the
- * bh->b_states's will not agree and only ones setup by BPW/BCW will have
- * valid state, thus the whole page must be written out thing.
+ * If we detect that a transaction would be required to flush the page, we
+ * have to check the process flags first, if we are already in a transaction
+ * or disk I/O during allocations is off, we need to fail the writepage and
+ * redirty the page.
  */
-
 STATIC int
-xfs_page_state_convert(
-       struct inode    *inode,
-       struct page     *page,
-       struct writeback_control *wbc,
-       int             startio,
-       int             unmapped) /* also implies page uptodate */
+xfs_vm_writepage(
+       struct page             *page,
+       struct writeback_control *wbc)
 {
+       struct inode            *inode = page->mapping->host;
+       int                     delalloc, unwritten;
        struct buffer_head      *bh, *head;
        struct xfs_bmbt_irec    imap;
        xfs_ioend_t             *ioend = NULL, *iohead = NULL;
        loff_t                  offset;
-       unsigned long           p_offset = 0;
        unsigned int            type;
        __uint64_t              end_offset;
        pgoff_t                 end_index, last_index;
        ssize_t                 size, len;
        int                     flags, err, imap_valid = 0, uptodate = 1;
-       int                     page_dirty, count = 0;
-       int                     trylock = 0;
-       int                     all_bh = unmapped;
+       int                     count = 0;
+       int                     all_bh = 0;
 
-       if (startio) {
-               if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
-                       trylock |= BMAPI_TRYLOCK;
-       }
+       trace_xfs_writepage(inode, page, 0);
+
+       ASSERT(page_has_buffers(page));
+
+       /*
+        * Refuse to write the page out if we are called from reclaim context.
+        *
+        * This avoids stack overflows when called from deeply used stacks in
+        * random callers for direct reclaim or memcg reclaim.  We explicitly
+        * allow reclaim from kswapd as the stack usage there is relatively low.
+        *
+        * This should really be done by the core VM, but until that happens
+        * filesystems like XFS, btrfs and ext4 have to take care of this
+        * by themselves.
+        */
+       if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
+               goto out_fail;
+
+       /*
+        * We need a transaction if there are delalloc or unwritten buffers
+        * on the page.
+        *
+        * If we need a transaction and the process flags say we are already
+        * in a transaction, or no IO is allowed then mark the page dirty
+        * again and leave the page as is.
+        */
+       xfs_count_page_state(page, &delalloc, &unwritten);
+       if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
+               goto out_fail;
 
        /* Is this page beyond the end of the file? */
        offset = i_size_read(inode);
@@ -1084,50 +1089,33 @@ xfs_page_state_convert(
        if (page->index >= end_index) {
                if ((page->index >= end_index + 1) ||
                    !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
-                       if (startio)
-                               unlock_page(page);
+                       unlock_page(page);
                        return 0;
                }
        }
 
-       /*
-        * page_dirty is initially a count of buffers on the page before
-        * EOF and is decremented as we move each into a cleanable state.
-        *
-        * Derivation:
-        *
-        * End offset is the highest offset that this page should represent.
-        * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
-        * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
-        * hence give us the correct page_dirty count. On any other page,
-        * it will be zero and in that case we need page_dirty to be the
-        * count of buffers on the page.
-        */
        end_offset = min_t(unsigned long long,
-                       (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
+                       (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
+                       offset);
        len = 1 << inode->i_blkbits;
-       p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
-                                       PAGE_CACHE_SIZE);
-       p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
-       page_dirty = p_offset / len;
 
        bh = head = page_buffers(page);
        offset = page_offset(page);
        flags = BMAPI_READ;
        type = IO_NEW;
 
-       /* TODO: cleanup count and page_dirty */
-
        do {
                if (offset >= end_offset)
                        break;
                if (!buffer_uptodate(bh))
                        uptodate = 0;
-               if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
-                       /*
-                        * the iomap is actually still valid, but the ioend
-                        * isn't.  shouldn't happen too often.
-                        */
+
+               /*
+                * A hole may still be marked uptodate because discard_buffer
+                * leaves the flag set.
+                */
+               if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
+                       ASSERT(!buffer_dirty(bh));
                        imap_valid = 0;
                        continue;
                }
@@ -1135,19 +1123,7 @@ xfs_page_state_convert(
                if (imap_valid)
                        imap_valid = xfs_imap_valid(inode, &imap, offset);
 
-               /*
-                * First case, map an unwritten extent and prepare for
-                * extent state conversion transaction on completion.
-                *
-                * Second case, allocate space for a delalloc buffer.
-                * We can return EAGAIN here in the release page case.
-                *
-                * Third case, an unmapped buffer was found, and we are
-                * in a path where we need to write the whole page out.
-                */
-               if (buffer_unwritten(bh) || buffer_delay(bh) ||
-                   ((buffer_uptodate(bh) || PageUptodate(page)) &&
-                    !buffer_mapped(bh) && (unmapped || startio))) {
+               if (buffer_unwritten(bh) || buffer_delay(bh)) {
                        int new_ioend = 0;
 
                        /*
@@ -1161,15 +1137,16 @@ xfs_page_state_convert(
                                flags = BMAPI_WRITE | BMAPI_IGNSTATE;
                        } else if (buffer_delay(bh)) {
                                type = IO_DELAY;
-                               flags = BMAPI_ALLOCATE | trylock;
-                       } else {
-                               type = IO_NEW;
-                               flags = BMAPI_WRITE | BMAPI_MMAP;
+                               flags = BMAPI_ALLOCATE;
+
+                               if (wbc->sync_mode == WB_SYNC_NONE &&
+                                   wbc->nonblocking)
+                                       flags |= BMAPI_TRYLOCK;
                        }
 
                        if (!imap_valid) {
                                /*
-                                * if we didn't have a valid mapping then we
+                                * If we didn't have a valid mapping then we
                                 * need to ensure that we put the new mapping
                                 * in a new ioend structure. This needs to be
                                 * done to ensure that the ioends correctly
@@ -1177,14 +1154,7 @@ xfs_page_state_convert(
                                 * for unwritten extent conversion.
                                 */
                                new_ioend = 1;
-                               if (type == IO_NEW) {
-                                       size = xfs_probe_cluster(inode,
-                                                       page, bh, head, 0);
-                               } else {
-                                       size = len;
-                               }
-
-                               err = xfs_map_blocks(inode, offset, size,
+                               err = xfs_map_blocks(inode, offset, len,
                                                &imap, flags);
                                if (err)
                                        goto error;
@@ -1193,19 +1163,11 @@ xfs_page_state_convert(
                        }
                        if (imap_valid) {
                                xfs_map_at_offset(inode, bh, &imap, offset);
-                               if (startio) {
-                                       xfs_add_to_ioend(inode, bh, offset,
-                                                       type, &ioend,
-                                                       new_ioend);
-                               } else {
-                                       set_buffer_dirty(bh);
-                                       unlock_buffer(bh);
-                                       mark_buffer_dirty(bh);
-                               }
-                               page_dirty--;
+                               xfs_add_to_ioend(inode, bh, offset, type,
+                                                &ioend, new_ioend);
                                count++;
                        }
-               } else if (buffer_uptodate(bh) && startio) {
+               } else if (buffer_uptodate(bh)) {
                        /*
                         * we got here because the buffer is already mapped.
                         * That means it must already have extents allocated
@@ -1213,8 +1175,7 @@ xfs_page_state_convert(
                         */
                        if (!imap_valid || flags != BMAPI_READ) {
                                flags = BMAPI_READ;
-                               size = xfs_probe_cluster(inode, page, bh,
-                                                               head, 1);
+                               size = xfs_probe_cluster(inode, page, bh, head);
                                err = xfs_map_blocks(inode, offset, size,
                                                &imap, flags);
                                if (err)
@@ -1233,18 +1194,16 @@ xfs_page_state_convert(
                         */
                        type = IO_NEW;
                        if (trylock_buffer(bh)) {
-                               ASSERT(buffer_mapped(bh));
                                if (imap_valid)
                                        all_bh = 1;
                                xfs_add_to_ioend(inode, bh, offset, type,
                                                &ioend, !imap_valid);
-                               page_dirty--;
                                count++;
                        } else {
                                imap_valid = 0;
                        }
-               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
-                          (unmapped || startio)) {
+               } else if (PageUptodate(page)) {
+                       ASSERT(buffer_mapped(bh));
                        imap_valid = 0;
                }
 
@@ -1256,8 +1215,7 @@ xfs_page_state_convert(
        if (uptodate && bh == head)
                SetPageUptodate(page);
 
-       if (startio)
-               xfs_start_page_writeback(page, 1, count);
+       xfs_start_page_writeback(page, 1, count);
 
        if (ioend && imap_valid) {
                xfs_off_t               end_index;
@@ -1275,131 +1233,27 @@ xfs_page_state_convert(
                        end_index = last_index;
 
                xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
-                                       wbc, startio, all_bh, end_index);
+                                       wbc, all_bh, end_index);
        }
 
        if (iohead)
                xfs_submit_ioend(wbc, iohead);
 
-       return page_dirty;
+       return 0;
 
 error:
        if (iohead)
                xfs_cancel_ioend(iohead);
 
-       /*
-        * If it's delalloc and we have nowhere to put it,
-        * throw it away, unless the lower layers told
-        * us to try again.
-        */
-       if (err != -EAGAIN) {
-               if (!unmapped)
-                       xfs_aops_discard_page(page);
-               ClearPageUptodate(page);
-       }
+       xfs_aops_discard_page(page);
+       ClearPageUptodate(page);
+       unlock_page(page);
        return err;
-}
-
-/*
- * writepage: Called from one of two places:
- *
- * 1. we are flushing a delalloc buffer head.
- *
- * 2. we are writing out a dirty page. Typically the page dirty
- *    state is cleared before we get here. In this case is it
- *    conceivable we have no buffer heads.
- *
- * For delalloc space on the page we need to allocate space and
- * flush it. For unmapped buffer heads on the page we should
- * allocate space if the page is uptodate. For any other dirty
- * buffer heads on the page we should flush them.
- *
- * If we detect that a transaction would be required to flush
- * the page, we have to check the process flags first, if we
- * are already in a transaction or disk I/O during allocations
- * is off, we need to fail the writepage and redirty the page.
- */
-
-STATIC int
-xfs_vm_writepage(
-       struct page             *page,
-       struct writeback_control *wbc)
-{
-       int                     error;
-       int                     need_trans;
-       int                     delalloc, unmapped, unwritten;
-       struct inode            *inode = page->mapping->host;
-
-       trace_xfs_writepage(inode, page, 0);
-
-       /*
-        * Refuse to write the page out if we are called from reclaim context.
-        *
-        * This is primarily to avoid stack overflows when called from deep
-        * used stacks in random callers for direct reclaim, but disabling
-        * reclaim for kswap is a nice side-effect as kswapd causes rather
-        * suboptimal I/O patters, too.
-        *
-        * This should really be done by the core VM, but until that happens
-        * filesystems like XFS, btrfs and ext4 have to take care of this
-        * by themselves.
-        */
-       if (current->flags & PF_MEMALLOC)
-               goto out_fail;
-
-       /*
-        * We need a transaction if:
-        *  1. There are delalloc buffers on the page
-        *  2. The page is uptodate and we have unmapped buffers
-        *  3. The page is uptodate and we have no buffers
-        *  4. There are unwritten buffers on the page
-        */
-
-       if (!page_has_buffers(page)) {
-               unmapped = 1;
-               need_trans = 1;
-       } else {
-               xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
-               if (!PageUptodate(page))
-                       unmapped = 0;
-               need_trans = delalloc + unmapped + unwritten;
-       }
-
-       /*
-        * If we need a transaction and the process flags say
-        * we are already in a transaction, or no IO is allowed
-        * then mark the page dirty again and leave the page
-        * as is.
-        */
-       if (current_test_flags(PF_FSTRANS) && need_trans)
-               goto out_fail;
-
-       /*
-        * Delay hooking up buffer heads until we have
-        * made our go/no-go decision.
-        */
-       if (!page_has_buffers(page))
-               create_empty_buffers(page, 1 << inode->i_blkbits, 0);
-
-       /*
-        * Convert delayed allocate, unwritten or unmapped space
-        * to real space and flush out to disk.
-        */
-       error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
-       if (error == -EAGAIN)
-               goto out_fail;
-       if (unlikely(error < 0))
-               goto out_unlock;
-
-       return 0;
 
 out_fail:
        redirty_page_for_writepage(wbc, page);
        unlock_page(page);
        return 0;
-out_unlock:
-       unlock_page(page);
-       return error;
 }
 
 STATIC int
@@ -1413,65 +1267,27 @@ xfs_vm_writepages(
 
 /*
  * Called to move a page into cleanable state - and from there
- * to be released. Possibly the page is already clean. We always
+ * to be released. The page should already be clean. We always
  * have buffer heads in this call.
  *
- * Returns 0 if the page is ok to release, 1 otherwise.
- *
- * Possible scenarios are:
- *
- * 1. We are being called to release a page which has been written
- *    to via regular I/O. buffer heads will be dirty and possibly
- *    delalloc. If no delalloc buffer heads in this case then we
- *    can just return zero.
- *
- * 2. We are called to release a page which has been written via
- *    mmap, all we need to do is ensure there is no delalloc
- *    state in the buffer heads, if not we can let the caller
- *    free them and we should come back later via writepage.
+ * Returns 1 if the page is ok to release, 0 otherwise.
  */
 STATIC int
 xfs_vm_releasepage(
        struct page             *page,
        gfp_t                   gfp_mask)
 {
-       struct inode            *inode = page->mapping->host;
-       int                     dirty, delalloc, unmapped, unwritten;
-       struct writeback_control wbc = {
-               .sync_mode = WB_SYNC_ALL,
-               .nr_to_write = 1,
-       };
+       int                     delalloc, unwritten;
 
-       trace_xfs_releasepage(inode, page, 0);
-
-       if (!page_has_buffers(page))
-               return 0;
+       trace_xfs_releasepage(page->mapping->host, page, 0);
 
-       xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
-       if (!delalloc && !unwritten)
-               goto free_buffers;
+       xfs_count_page_state(page, &delalloc, &unwritten);
 
-       if (!(gfp_mask & __GFP_FS))
+       if (WARN_ON(delalloc))
                return 0;
-
-       /* If we are already inside a transaction or the thread cannot
-        * do I/O, we cannot release this page.
-        */
-       if (current_test_flags(PF_FSTRANS))
+       if (WARN_ON(unwritten))
                return 0;
 
-       /*
-        * Convert delalloc space to real space, do not flush the
-        * data out to disk, that will be done by the caller.
-        * Never need to allocate space here - we will always
-        * come back to writepage in that case.
-        */
-       dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
-       if (dirty == 0 && !unwritten)
-               goto free_buffers;
-       return 0;
-
-free_buffers:
        return try_to_free_buffers(page);
 }
 
@@ -1481,9 +1297,9 @@ __xfs_get_blocks(
        sector_t                iblock,
        struct buffer_head      *bh_result,
        int                     create,
-       int                     direct,
-       bmapi_flags_t           flags)
+       int                     direct)
 {
+       int                     flags = create ? BMAPI_WRITE : BMAPI_READ;
        struct xfs_bmbt_irec    imap;
        xfs_off_t               offset;
        ssize_t                 size;
@@ -1498,8 +1314,11 @@ __xfs_get_blocks(
        if (!create && direct && offset >= i_size_read(inode))
                return 0;
 
-       error = xfs_iomap(XFS_I(inode), offset, size,
-                            create ? flags : BMAPI_READ, &imap, &nimap, &new);
+       if (direct && create)
+               flags |= BMAPI_DIRECT;
+
+       error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
+                         &new);
        if (error)
                return -error;
        if (nimap == 0)
@@ -1579,8 +1398,7 @@ xfs_get_blocks(
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __xfs_get_blocks(inode, iblock,
-                               bh_result, create, 0, BMAPI_WRITE);
+       return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
 }
 
 STATIC int
@@ -1590,61 +1408,59 @@ xfs_get_blocks_direct(
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __xfs_get_blocks(inode, iblock,
-                               bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+       return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
 }
 
+/*
+ * Complete a direct I/O write request.
+ *
+ * If the private argument is non-NULL __xfs_get_blocks signals us that we
+ * need to issue a transaction to convert the range from unwritten to written
+ * extents.  In case this is regular synchronous I/O we just call xfs_end_io
+ * to do this and we are done.  But in case this was a successfull AIO
+ * request this handler is called from interrupt context, from which we
+ * can't start transactions.  In that case offload the I/O completion to
+ * the workqueues we also use for buffered I/O completion.
+ */
 STATIC void
-xfs_end_io_direct(
-       struct kiocb    *iocb,
-       loff_t          offset,
-       ssize_t         size,
-       void            *private)
+xfs_end_io_direct_write(
+       struct kiocb            *iocb,
+       loff_t                  offset,
+       ssize_t                 size,
+       void                    *private,
+       int                     ret,
+       bool                    is_async)
 {
-       xfs_ioend_t     *ioend = iocb->private;
+       struct xfs_ioend        *ioend = iocb->private;
 
        /*
-        * Non-NULL private data means we need to issue a transaction to
-        * convert a range from unwritten to written extents.  This needs
-        * to happen from process context but aio+dio I/O completion
-        * happens from irq context so we need to defer it to a workqueue.
-        * This is not necessary for synchronous direct I/O, but we do
-        * it anyway to keep the code uniform and simpler.
-        *
-        * Well, if only it were that simple. Because synchronous direct I/O
-        * requires extent conversion to occur *before* we return to userspace,
-        * we have to wait for extent conversion to complete. Look at the
-        * iocb that has been passed to us to determine if this is AIO or
-        * not. If it is synchronous, tell xfs_finish_ioend() to kick the
-        * workqueue and wait for it to complete.
-        *
-        * The core direct I/O code might be changed to always call the
-        * completion handler in the future, in which case all this can
-        * go away.
+        * blockdev_direct_IO can return an error even after the I/O
+        * completion handler was called.  Thus we need to protect
+        * against double-freeing.
         */
+       iocb->private = NULL;
+
        ioend->io_offset = offset;
        ioend->io_size = size;
-       if (ioend->io_type == IO_READ) {
-               xfs_finish_ioend(ioend, 0);
-       } else if (private && size > 0) {
-               xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
-       } else {
+       if (private && size > 0)
+               ioend->io_type = IO_UNWRITTEN;
+
+       if (is_async) {
                /*
-                * A direct I/O write ioend starts it's life in unwritten
-                * state in case they map an unwritten extent.  This write
-                * didn't map an unwritten extent so switch it's completion
-                * handler.
+                * If we are converting an unwritten extent we need to delay
+                * the AIO completion until after the unwrittent extent
+                * conversion has completed, otherwise do it ASAP.
                 */
-               ioend->io_type = IO_NEW;
-               xfs_finish_ioend(ioend, 0);
+               if (ioend->io_type == IO_UNWRITTEN) {
+                       ioend->io_iocb = iocb;
+                       ioend->io_result = ret;
+               } else {
+                       aio_complete(iocb, ret, 0);
+               }
+               xfs_finish_ioend(ioend);
+       } else {
+               xfs_finish_ioend_sync(ioend);
        }
-
-       /*
-        * blockdev_direct_IO can return an error even after the I/O
-        * completion handler was called.  Thus we need to protect
-        * against double-freeing.
-        */
-       iocb->private = NULL;
 }
 
 STATIC ssize_t
@@ -1655,23 +1471,26 @@ xfs_vm_direct_IO(
        loff_t                  offset,
        unsigned long           nr_segs)
 {
-       struct file     *file = iocb->ki_filp;
-       struct inode    *inode = file->f_mapping->host;
-       struct block_device *bdev;
-       ssize_t         ret;
-
-       bdev = xfs_find_bdev_for_inode(inode);
-
-       iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
-                                       IO_UNWRITTEN : IO_READ);
-
-       ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
-                                           offset, nr_segs,
-                                           xfs_get_blocks_direct,
-                                           xfs_end_io_direct);
+       struct inode            *inode = iocb->ki_filp->f_mapping->host;
+       struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
+       ssize_t                 ret;
+
+       if (rw & WRITE) {
+               iocb->private = xfs_alloc_ioend(inode, IO_NEW);
+
+               ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
+                                                   offset, nr_segs,
+                                                   xfs_get_blocks_direct,
+                                                   xfs_end_io_direct_write);
+               if (ret != -EIOCBQUEUED && iocb->private)
+                       xfs_destroy_ioend(iocb->private);
+       } else {
+               ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
+                                                   offset, nr_segs,
+                                                   xfs_get_blocks_direct,
+                                                   NULL);
+       }
 
-       if (unlikely(ret != -EIOCBQUEUED && iocb->private))
-               xfs_destroy_ioend(iocb->private);
        return ret;
 }
 
@@ -1686,8 +1505,8 @@ xfs_vm_write_begin(
        void                    **fsdata)
 {
        *pagep = NULL;
-       return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
-                                                               xfs_get_blocks);
+       return block_write_begin(file, mapping, pos, len, flags | AOP_FLAG_NOFS,
+                                pagep, fsdata, xfs_get_blocks);
 }
 
 STATIC sector_t
@@ -1698,7 +1517,7 @@ xfs_vm_bmap(
        struct inode            *inode = (struct inode *)mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
 
-       xfs_itrace_entry(XFS_I(inode));
+       trace_xfs_vm_bmap(XFS_I(inode));
        xfs_ilock(ip, XFS_IOLOCK_SHARED);
        xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
index 4cfc6ea87df8c4c34564da66ee31aba497dad902..c5057fb6237a2c71bd94b5ad4abe046bf7a37e9e 100644 (file)
@@ -37,6 +37,8 @@ typedef struct xfs_ioend {
        size_t                  io_size;        /* size of the extent */
        xfs_off_t               io_offset;      /* offset in the file */
        struct work_struct      io_work;        /* xfsdatad work queue */
+       struct kiocb            *io_iocb;
+       int                     io_result;
 } xfs_ioend_t;
 
 extern const struct address_space_operations xfs_address_space_operations;
@@ -45,6 +47,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
 extern void xfs_ioend_init(void);
 extern void xfs_ioend_wait(struct xfs_inode *);
 
-extern void xfs_count_page_state(struct page *, int *, int *, int *);
+extern void xfs_count_page_state(struct page *, int *, int *);
 
 #endif /* __XFS_AOPS_H__ */
index 2ee3f7a60163e899e971700aba008a243049726a..ea79072f521012549c4c2673a6210340834202a4 100644 (file)
@@ -39,7 +39,6 @@
 #include "xfs_inum.h"
 #include "xfs_log.h"
 #include "xfs_ag.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_trace.h"
 
@@ -579,9 +578,9 @@ _xfs_buf_read(
                        XBF_READ_AHEAD | _XBF_RUN_QUEUES);
 
        status = xfs_buf_iorequest(bp);
-       if (!status && !(flags & XBF_ASYNC))
-               status = xfs_buf_iowait(bp);
-       return status;
+       if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
+               return status;
+       return xfs_buf_iowait(bp);
 }
 
 xfs_buf_t *
@@ -897,36 +896,6 @@ xfs_buf_unlock(
        trace_xfs_buf_unlock(bp, _RET_IP_);
 }
 
-
-/*
- *     Pinning Buffer Storage in Memory
- *     Ensure that no attempt to force a buffer to disk will succeed.
- */
-void
-xfs_buf_pin(
-       xfs_buf_t               *bp)
-{
-       trace_xfs_buf_pin(bp, _RET_IP_);
-       atomic_inc(&bp->b_pin_count);
-}
-
-void
-xfs_buf_unpin(
-       xfs_buf_t               *bp)
-{
-       trace_xfs_buf_unpin(bp, _RET_IP_);
-
-       if (atomic_dec_and_test(&bp->b_pin_count))
-               wake_up_all(&bp->b_waiters);
-}
-
-int
-xfs_buf_ispin(
-       xfs_buf_t               *bp)
-{
-       return atomic_read(&bp->b_pin_count);
-}
-
 STATIC void
 xfs_buf_wait_unpin(
        xfs_buf_t               *bp)
@@ -1018,13 +987,12 @@ xfs_bwrite(
 {
        int                     error;
 
-       bp->b_strat = xfs_bdstrat_cb;
        bp->b_mount = mp;
        bp->b_flags |= XBF_WRITE;
        bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
 
        xfs_buf_delwri_dequeue(bp);
-       xfs_buf_iostrategy(bp);
+       xfs_bdstrat_cb(bp);
 
        error = xfs_buf_iowait(bp);
        if (error)
@@ -1040,7 +1008,6 @@ xfs_bdwrite(
 {
        trace_xfs_buf_bdwrite(bp, _RET_IP_);
 
-       bp->b_strat = xfs_bdstrat_cb;
        bp->b_mount = mp;
 
        bp->b_flags &= ~XBF_READ;
@@ -1075,7 +1042,6 @@ xfs_bioerror(
        XFS_BUF_UNDONE(bp);
        XFS_BUF_STALE(bp);
 
-       XFS_BUF_CLR_BDSTRAT_FUNC(bp);
        xfs_biodone(bp);
 
        return EIO;
@@ -1105,7 +1071,6 @@ xfs_bioerror_relse(
        XFS_BUF_DONE(bp);
        XFS_BUF_STALE(bp);
        XFS_BUF_CLR_IODONE_FUNC(bp);
-       XFS_BUF_CLR_BDSTRAT_FUNC(bp);
        if (!(fl & XBF_ASYNC)) {
                /*
                 * Mark b_error and B_ERROR _both_.
@@ -1311,8 +1276,19 @@ submit_io:
                if (size)
                        goto next_chunk;
        } else {
-               bio_put(bio);
+               /*
+                * if we get here, no pages were added to the bio. However,
+                * we can't just error out here - if the pages are locked then
+                * we have to unlock them otherwise we can hang on a later
+                * access to the page.
+                */
                xfs_buf_ioerror(bp, EIO);
+               if (bp->b_flags & _XBF_PAGE_LOCKED) {
+                       int i;
+                       for (i = 0; i < bp->b_page_count; i++)
+                               unlock_page(bp->b_pages[i]);
+               }
+               bio_put(bio);
        }
 }
 
@@ -1804,7 +1780,7 @@ xfs_buf_delwri_split(
                trace_xfs_buf_delwri_split(bp, _RET_IP_);
                ASSERT(bp->b_flags & XBF_DELWRI);
 
-               if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
+               if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
                        if (!force &&
                            time_before(jiffies, bp->b_queuetime + age)) {
                                xfs_buf_unlock(bp);
@@ -1889,7 +1865,7 @@ xfsbufd(
                        struct xfs_buf *bp;
                        bp = list_first_entry(&tmp, struct xfs_buf, b_list);
                        list_del_init(&bp->b_list);
-                       xfs_buf_iostrategy(bp);
+                       xfs_bdstrat_cb(bp);
                        count++;
                }
                if (count)
@@ -1936,7 +1912,7 @@ xfs_flush_buftarg(
                        bp->b_flags &= ~XBF_ASYNC;
                        list_add(&bp->b_list, &wait_list);
                }
-               xfs_buf_iostrategy(bp);
+               xfs_bdstrat_cb(bp);
        }
 
        if (wait) {
index 5fbecefa5dfd69a9807cad3a4f4d786e579d0062..d072e5ff923b3e71eac4000bd432cd84d72e0d9b 100644 (file)
@@ -44,57 +44,57 @@ typedef enum {
        XBRW_ZERO = 3,                  /* Zero target memory */
 } xfs_buf_rw_t;
 
-typedef enum {
-       XBF_READ = (1 << 0),    /* buffer intended for reading from device */
-       XBF_WRITE = (1 << 1),   /* buffer intended for writing to device   */
-       XBF_MAPPED = (1 << 2),  /* buffer mapped (b_addr valid)            */
-       XBF_ASYNC = (1 << 4),   /* initiator will not wait for completion  */
-       XBF_DONE = (1 << 5),    /* all pages in the buffer uptodate        */
-       XBF_DELWRI = (1 << 6),  /* buffer has dirty pages                  */
-       XBF_STALE = (1 << 7),   /* buffer has been staled, do not find it  */
-       XBF_FS_MANAGED = (1 << 8),  /* filesystem controls freeing memory  */
-       XBF_ORDERED = (1 << 11),    /* use ordered writes                  */
-       XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead             */
-       XBF_LOG_BUFFER = (1 << 13), /* this is a buffer used for the log   */
-
-       /* flags used only as arguments to access routines */
-       XBF_LOCK = (1 << 14),       /* lock requested                      */
-       XBF_TRYLOCK = (1 << 15),    /* lock requested, but do not wait     */
-       XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread      */
-
-       /* flags used only internally */
-       _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache                 */
-       _XBF_PAGES = (1 << 18),     /* backed by refcounted pages          */
-       _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue         */
-       _XBF_DELWRI_Q = (1 << 21),   /* buffer on delwri queue             */
-
-       /*
-        * Special flag for supporting metadata blocks smaller than a FSB.
-        *
-        * In this case we can have multiple xfs_buf_t on a single page and
-        * need to lock out concurrent xfs_buf_t readers as they only
-        * serialise access to the buffer.
-        *
-        * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
-        * between reads of the page. Hence we can have one thread read the
-        * page and modify it, but then race with another thread that thinks
-        * the page is not up-to-date and hence reads it again.
-        *
-        * The result is that the first modifcation to the page is lost.
-        * This sort of AGF/AGI reading race can happen when unlinking inodes
-        * that require truncation and results in the AGI unlinked list
-        * modifications being lost.
-        */
-       _XBF_PAGE_LOCKED = (1 << 22),
-
-       /*
-        * If we try a barrier write, but it fails we have to communicate
-        * this to the upper layers.  Unfortunately b_error gets overwritten
-        * when the buffer is re-issued so we have to add another flag to
-        * keep this information.
-        */
-       _XFS_BARRIER_FAILED = (1 << 23),
-} xfs_buf_flags_t;
+#define XBF_READ       (1 << 0) /* buffer intended for reading from device */
+#define XBF_WRITE      (1 << 1) /* buffer intended for writing to device */
+#define XBF_MAPPED     (1 << 2) /* buffer mapped (b_addr valid) */
+#define XBF_ASYNC      (1 << 4) /* initiator will not wait for completion */
+#define XBF_DONE       (1 << 5) /* all pages in the buffer uptodate */
+#define XBF_DELWRI     (1 << 6) /* buffer has dirty pages */
+#define XBF_STALE      (1 << 7) /* buffer has been staled, do not find it */
+#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
+#define XBF_ORDERED    (1 << 11)/* use ordered writes */
+#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
+#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
+
+/* flags used only as arguments to access routines */
+#define XBF_LOCK       (1 << 14)/* lock requested */
+#define XBF_TRYLOCK    (1 << 15)/* lock requested, but do not wait */
+#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
+
+/* flags used only internally */
+#define _XBF_PAGE_CACHE        (1 << 17)/* backed by pagecache */
+#define _XBF_PAGES     (1 << 18)/* backed by refcounted pages */
+#define        _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
+#define _XBF_DELWRI_Q  (1 << 21)/* buffer on delwri queue */
+
+/*
+ * Special flag for supporting metadata blocks smaller than a FSB.
+ *
+ * In this case we can have multiple xfs_buf_t on a single page and
+ * need to lock out concurrent xfs_buf_t readers as they only
+ * serialise access to the buffer.
+ *
+ * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
+ * between reads of the page. Hence we can have one thread read the
+ * page and modify it, but then race with another thread that thinks
+ * the page is not up-to-date and hence reads it again.
+ *
+ * The result is that the first modifcation to the page is lost.
+ * This sort of AGF/AGI reading race can happen when unlinking inodes
+ * that require truncation and results in the AGI unlinked list
+ * modifications being lost.
+ */
+#define _XBF_PAGE_LOCKED       (1 << 22)
+
+/*
+ * If we try a barrier write, but it fails we have to communicate
+ * this to the upper layers.  Unfortunately b_error gets overwritten
+ * when the buffer is re-issued so we have to add another flag to
+ * keep this information.
+ */
+#define _XFS_BARRIER_FAILED    (1 << 23)
+
+typedef unsigned int xfs_buf_flags_t;
 
 #define XFS_BUF_FLAGS \
        { XBF_READ,             "READ" }, \
@@ -187,7 +187,6 @@ typedef struct xfs_buf {
        atomic_t                b_io_remaining; /* #outstanding I/O requests */
        xfs_buf_iodone_t        b_iodone;       /* I/O completion function */
        xfs_buf_relse_t         b_relse;        /* releasing function */
-       xfs_buf_bdstrat_t       b_strat;        /* pre-write function */
        struct completion       b_iowait;       /* queue for I/O waiters */
        void                    *b_fspriv;
        void                    *b_fspriv2;
@@ -245,11 +244,6 @@ extern int xfs_buf_iowait(xfs_buf_t *);
 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
                                xfs_buf_rw_t);
 
-static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
-{
-       return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
-}
-
 static inline int xfs_buf_geterror(xfs_buf_t *bp)
 {
        return bp ? bp->b_error : ENOMEM;
@@ -258,11 +252,6 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp)
 /* Buffer Utility Routines */
 extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
 
-/* Pinning Buffer Storage in Memory */
-extern void xfs_buf_pin(xfs_buf_t *);
-extern void xfs_buf_unpin(xfs_buf_t *);
-extern int xfs_buf_ispin(xfs_buf_t *);
-
 /* Delayed Write Buffer Routines */
 extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
 extern void xfs_buf_delwri_promote(xfs_buf_t *);
@@ -326,8 +315,6 @@ extern void xfs_buf_terminate(void);
 #define XFS_BUF_IODONE_FUNC(bp)                        ((bp)->b_iodone)
 #define XFS_BUF_SET_IODONE_FUNC(bp, func)      ((bp)->b_iodone = (func))
 #define XFS_BUF_CLR_IODONE_FUNC(bp)            ((bp)->b_iodone = NULL)
-#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func)     ((bp)->b_strat = (func))
-#define XFS_BUF_CLR_BDSTRAT_FUNC(bp)           ((bp)->b_strat = NULL)
 
 #define XFS_BUF_FSPRIVATE(bp, type)            ((type)(bp)->b_fspriv)
 #define XFS_BUF_SET_FSPRIVATE(bp, val)         ((bp)->b_fspriv = (void*)(val))
@@ -351,7 +338,7 @@ extern void xfs_buf_terminate(void);
 #define XFS_BUF_SET_VTYPE(bp, type)            do { } while (0)
 #define XFS_BUF_SET_REF(bp, ref)               do { } while (0)
 
-#define XFS_BUF_ISPINNED(bp)   xfs_buf_ispin(bp)
+#define XFS_BUF_ISPINNED(bp)   atomic_read(&((bp)->b_pin_count))
 
 #define XFS_BUF_VALUSEMA(bp)   xfs_buf_lock_value(bp)
 #define XFS_BUF_CPSEMA(bp)     (xfs_buf_cond_lock(bp) == 0)
@@ -370,8 +357,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
        xfs_buf_rele(bp);
 }
 
-#define xfs_bpin(bp)           xfs_buf_pin(bp)
-#define xfs_bunpin(bp)         xfs_buf_unpin(bp)
 #define xfs_biodone(bp)                xfs_buf_ioend(bp, 0)
 
 #define xfs_biomove(bp, off, len, data, rw) \
diff --git a/fs/xfs/linux-2.6/xfs_dmapi_priv.h b/fs/xfs/linux-2.6/xfs_dmapi_priv.h
deleted file mode 100644 (file)
index a8b0b16..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_DMAPI_PRIV_H__
-#define __XFS_DMAPI_PRIV_H__
-
-/*
- *     Based on IO_ISDIRECT, decide which i_ flag is set.
- */
-#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
-                             DM_FLAGS_IMUX : 0)
-#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
-
-#endif /*__XFS_DMAPI_PRIV_H__*/
index e7839ee49e43656d0d1edfa7011ce8d9eb0e97b2..3764d74790ecc5c8dc5301e36dd1605f5af045e8 100644 (file)
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_export.h"
 #include "xfs_vnodeops.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
+#include "xfs_trace.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
@@ -132,8 +132,7 @@ xfs_nfs_get_inode(
         * fine and not an indication of a corrupted filesystem as clients can
         * send invalid file handles and we have to handle it gracefully..
         */
-       error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED,
-                        XFS_ILOCK_SHARED, &ip);
+       error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip);
        if (error) {
                /*
                 * EINVAL means the inode cluster doesn't exist anymore.
@@ -148,11 +147,10 @@ xfs_nfs_get_inode(
        }
 
        if (ip->i_d.di_gen != generation) {
-               xfs_iput_new(ip, XFS_ILOCK_SHARED);
+               IRELE(ip);
                return ERR_PTR(-ENOENT);
        }
 
-       xfs_iunlock(ip, XFS_ILOCK_SHARED);
        return VFS_I(ip);
 }
 
index 257a56b127cf060013569a211de51efd7f2b1306..ba8ad422a16506fdea605c7a215e572b3b3a8f6a 100644 (file)
 #include "xfs_inum.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_trans.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
-#include "xfs_rw.h"
 #include "xfs_vnodeops.h"
 #include "xfs_da_btree.h"
 #include "xfs_ioctl.h"
@@ -108,7 +100,7 @@ xfs_file_fsync(
        int                     error = 0;
        int                     log_flushed = 0;
 
-       xfs_itrace_entry(ip);
+       trace_xfs_file_fsync(ip);
 
        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                return -XFS_ERROR(EIO);
@@ -166,8 +158,7 @@ xfs_file_fsync(
                 * transaction.  So we play it safe and fire off the
                 * transaction anyway.
                 */
-               xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-               xfs_trans_ihold(tp, ip);
+               xfs_trans_ijoin(tp, ip);
                xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
                xfs_trans_set_sync(tp);
                error = _xfs_trans_commit(tp, 0, &log_flushed);
@@ -275,20 +266,6 @@ xfs_file_aio_read(
                mutex_lock(&inode->i_mutex);
        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
-       if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
-               int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
-               int iolock = XFS_IOLOCK_SHARED;
-
-               ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size,
-                                       dmflags, &iolock);
-               if (ret) {
-                       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-                       if (unlikely(ioflags & IO_ISDIRECT))
-                               mutex_unlock(&inode->i_mutex);
-                       return ret;
-               }
-       }
-
        if (unlikely(ioflags & IO_ISDIRECT)) {
                if (inode->i_mapping->nrpages) {
                        ret = -xfs_flushinval_pages(ip,
@@ -321,7 +298,6 @@ xfs_file_splice_read(
        unsigned int            flags)
 {
        struct xfs_inode        *ip = XFS_I(infilp->f_mapping->host);
-       struct xfs_mount        *mp = ip->i_mount;
        int                     ioflags = 0;
        ssize_t                 ret;
 
@@ -335,18 +311,6 @@ xfs_file_splice_read(
 
        xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
-       if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
-               int iolock = XFS_IOLOCK_SHARED;
-               int error;
-
-               error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
-                                       FILP_DELAY_FLAG(infilp), &iolock);
-               if (error) {
-                       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-                       return -error;
-               }
-       }
-
        trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
 
        ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
@@ -367,7 +331,6 @@ xfs_file_splice_write(
 {
        struct inode            *inode = outfilp->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
-       struct xfs_mount        *mp = ip->i_mount;
        xfs_fsize_t             isize, new_size;
        int                     ioflags = 0;
        ssize_t                 ret;
@@ -382,18 +345,6 @@ xfs_file_splice_write(
 
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
-       if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
-               int iolock = XFS_IOLOCK_EXCL;
-               int error;
-
-               error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
-                                       FILP_DELAY_FLAG(outfilp), &iolock);
-               if (error) {
-                       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-                       return -error;
-               }
-       }
-
        new_size = *ppos + count;
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -463,7 +414,7 @@ xfs_zero_last_block(
        last_fsb = XFS_B_TO_FSBT(mp, isize);
        nimaps = 1;
        error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
-                         &nimaps, NULL, NULL);
+                         &nimaps, NULL);
        if (error) {
                return error;
        }
@@ -558,7 +509,7 @@ xfs_zero_eof(
                nimaps = 1;
                zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
                error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
-                                 0, NULL, 0, &imap, &nimaps, NULL, NULL);
+                                 0, NULL, 0, &imap, &nimaps, NULL);
                if (error) {
                        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
                        return error;
@@ -627,7 +578,6 @@ xfs_file_aio_write(
        int                     ioflags = 0;
        xfs_fsize_t             isize, new_size;
        int                     iolock;
-       int                     eventsent = 0;
        size_t                  ocount = 0, count;
        int                     need_i_mutex;
 
@@ -673,33 +623,6 @@ start:
                goto out_unlock_mutex;
        }
 
-       if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) &&
-           !(ioflags & IO_INVIS) && !eventsent)) {
-               int             dmflags = FILP_DELAY_FLAG(file);
-
-               if (need_i_mutex)
-                       dmflags |= DM_FLAGS_IMUX;
-
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-               error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip,
-                                     pos, count, dmflags, &iolock);
-               if (error) {
-                       goto out_unlock_internal;
-               }
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               eventsent = 1;
-
-               /*
-                * The iolock was dropped and reacquired in XFS_SEND_DATA
-                * so we have to recheck the size when appending.
-                * We will only "goto start;" once, since having sent the
-                * event prevents another call to XFS_SEND_DATA, which is
-                * what allows the size to change in the first place.
-                */
-               if ((file->f_flags & O_APPEND) && pos != ip->i_size)
-                       goto start;
-       }
-
        if (ioflags & IO_ISDIRECT) {
                xfs_buftarg_t   *target =
                        XFS_IS_REALTIME_INODE(ip) ?
@@ -830,22 +753,6 @@ write_retry:
                xfs_iunlock(ip, XFS_ILOCK_EXCL);
        }
 
-       if (ret == -ENOSPC &&
-           DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
-               xfs_iunlock(ip, iolock);
-               if (need_i_mutex)
-                       mutex_unlock(&inode->i_mutex);
-               error = XFS_SEND_NAMESP(ip->i_mount, DM_EVENT_NOSPACE, ip,
-                               DM_RIGHT_NULL, ip, DM_RIGHT_NULL, NULL, NULL,
-                               0, 0, 0); /* Delay flag intentionally  unused */
-               if (need_i_mutex)
-                       mutex_lock(&inode->i_mutex);
-               xfs_ilock(ip, iolock);
-               if (error)
-                       goto out_unlock_internal;
-               goto start;
-       }
-
        error = -ret;
        if (ret <= 0)
                goto out_unlock_internal;
@@ -1014,9 +921,6 @@ const struct file_operations xfs_file_operations = {
        .open           = xfs_file_open,
        .release        = xfs_file_release,
        .fsync          = xfs_file_fsync,
-#ifdef HAVE_FOP_OPEN_EXEC
-       .open_exec      = xfs_file_open_exec,
-#endif
 };
 
 const struct file_operations xfs_dir_file_operations = {
index b6918d76bc7bbdf9740a07955c0e1694a6bf9825..1f279b012f94be7fd4ef2f5fbf084b89921804c1 100644 (file)
 #include "xfs_inode.h"
 #include "xfs_trace.h"
 
-int  fs_noerr(void) { return 0; }
-int  fs_nosys(void) { return ENOSYS; }
-void fs_noval(void) { return; }
-
 /*
  * note: all filemap functions return negative error codes. These
  * need to be inverted before returning to the xfs core functions.
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.h b/fs/xfs/linux-2.6/xfs_fs_subr.h
deleted file mode 100644 (file)
index 82bb19b..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef        __XFS_FS_SUBR_H__
-#define __XFS_FS_SUBR_H__
-
-extern int  fs_noerr(void);
-extern int  fs_nosys(void);
-extern void fs_noval(void);
-
-#endif /* __XFS_FS_SUBR_H__ */
index e59a8106283069b24bb9fb4b5f0ea5bec92d8ede..237f5ffb2ee8ca067f6dc81ebc38a6de1f7b36ce 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ioctl.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_bmap.h"
 #include "xfs_buf_item.h"
@@ -908,7 +899,7 @@ xfs_ioctl_setattr(
        struct xfs_dquot        *olddquot = NULL;
        int                     code;
 
-       xfs_itrace_entry(ip);
+       trace_xfs_ioctl_setattr(ip);
 
        if (mp->m_flags & XFS_MOUNT_RDONLY)
                return XFS_ERROR(EROFS);
@@ -1043,8 +1034,7 @@ xfs_ioctl_setattr(
                }
        }
 
-       xfs_trans_ijoin(tp, ip, lock_flags);
-       xfs_trans_ihold(tp, ip);
+       xfs_trans_ijoin(tp, ip);
 
        /*
         * Change file ownership.  Must be the owner or privileged.
@@ -1116,16 +1106,7 @@ xfs_ioctl_setattr(
        xfs_qm_dqrele(udqp);
        xfs_qm_dqrele(gdqp);
 
-       if (code)
-               return code;
-
-       if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE)) {
-               XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
-                               NULL, DM_RIGHT_NULL, NULL, NULL, 0, 0,
-                               (mask & FSX_NONBLOCK) ? DM_FLAGS_NDELAY : 0);
-       }
-
-       return 0;
+       return code;
 
  error_return:
        xfs_qm_dqrele(udqp);
@@ -1301,7 +1282,7 @@ xfs_file_ioctl(
        if (filp->f_mode & FMODE_NOCMTIME)
                ioflags |= IO_INVIS;
 
-       xfs_itrace_entry(ip);
+       trace_xfs_file_ioctl(ip);
 
        switch (cmd) {
        case XFS_IOC_ALLOCSP:
index 52ed49e6465cff0eee12844c13b169583cc1411c..6c83f7f62dc99ca29d5597aad61c97b432ec19ed 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
 #include "xfs_vnode.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
@@ -544,7 +540,7 @@ xfs_file_compat_ioctl(
        if (filp->f_mode & FMODE_NOCMTIME)
                ioflags |= IO_INVIS;
 
-       xfs_itrace_entry(ip);
+       trace_xfs_file_compat_ioctl(ip);
 
        switch (cmd) {
        /* No size or alignment issues on any arch */
index 44f0b2de153eb6c4cf77b36c54278f36166d59ba..536b81e63a3d8e397d8e487192f938809ee26923 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_itable.h"
@@ -496,7 +488,7 @@ xfs_vn_getattr(
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
 
-       xfs_itrace_entry(ip);
+       trace_xfs_getattr(ip);
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
index facfb323a706a914de84de68b504762c82f05d6c..998a9d7fb9c8964d2174f62f935f6fd46b4f2042 100644 (file)
@@ -87,7 +87,6 @@
 #include <xfs_aops.h>
 #include <xfs_super.h>
 #include <xfs_globals.h>
-#include <xfs_fs_subr.h>
 #include <xfs_buf.h>
 
 /*
index 067cafbfc6357fee83a5adbf265e74ab93eeb999..bfd5ac9d1f6f774c49a58588ecc3d6f881bb04aa 100644 (file)
@@ -16,7 +16,6 @@
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_dmapi.h"
 #include "xfs_sb.h"
 #include "xfs_inum.h"
 #include "xfs_log.h"
index 80938c736c2769861327ea10368cd0c20d4ceb51..758df94690edc7ab8663933b289b6c17e82d7289 100644 (file)
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
@@ -43,7 +40,6 @@
 #include "xfs_error.h"
 #include "xfs_itable.h"
 #include "xfs_fsops.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_utils.h"
@@ -94,7 +90,6 @@ mempool_t *xfs_ioend_pool;
 #define MNTOPT_BARRIER "barrier"       /* use writer barriers for log write and
                                         * unwritten extent conversion */
 #define MNTOPT_NOBARRIER "nobarrier"   /* .. disable */
-#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
 #define MNTOPT_64BITINODE   "inode64"  /* inodes can be allocated anywhere */
 #define MNTOPT_IKEEP   "ikeep"         /* do not free empty inode clusters */
 #define MNTOPT_NOIKEEP "noikeep"       /* free empty inode clusters */
@@ -116,9 +111,6 @@ mempool_t *xfs_ioend_pool;
 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
 #define MNTOPT_QUOTANOENF  "qnoenforce"        /* same as uqnoenforce */
-#define MNTOPT_DMAPI   "dmapi"         /* DMI enabled (DMAPI / XDSM) */
-#define MNTOPT_XDSM    "xdsm"          /* DMI enabled (DMAPI / XDSM) */
-#define MNTOPT_DMI     "dmi"           /* DMI enabled (DMAPI / XDSM) */
 #define MNTOPT_DELAYLOG   "delaylog"   /* Delayed loging enabled */
 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
 
@@ -172,15 +164,13 @@ suffix_strtoul(char *s, char **endp, unsigned int base)
 STATIC int
 xfs_parseargs(
        struct xfs_mount        *mp,
-       char                    *options,
-       char                    **mtpt)
+       char                    *options)
 {
        struct super_block      *sb = mp->m_super;
        char                    *this_char, *value, *eov;
        int                     dsunit = 0;
        int                     dswidth = 0;
        int                     iosize = 0;
-       int                     dmapi_implies_ikeep = 1;
        __uint8_t               iosizelog = 0;
 
        /*
@@ -243,15 +233,10 @@ xfs_parseargs(
                        if (!mp->m_logname)
                                return ENOMEM;
                } else if (!strcmp(this_char, MNTOPT_MTPT)) {
-                       if (!value || !*value) {
-                               cmn_err(CE_WARN,
-                                       "XFS: %s option requires an argument",
-                                       this_char);
-                               return EINVAL;
-                       }
-                       *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
-                       if (!*mtpt)
-                               return ENOMEM;
+                       cmn_err(CE_WARN,
+                               "XFS: %s option not allowed on this system",
+                               this_char);
+                       return EINVAL;
                } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
                        if (!value || !*value) {
                                cmn_err(CE_WARN,
@@ -288,8 +273,6 @@ xfs_parseargs(
                        mp->m_flags &= ~XFS_MOUNT_GRPID;
                } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
                        mp->m_flags |= XFS_MOUNT_WSYNC;
-               } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
-                       mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
                } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
                        mp->m_flags |= XFS_MOUNT_NORECOVERY;
                } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
@@ -329,7 +312,6 @@ xfs_parseargs(
                } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
                        mp->m_flags |= XFS_MOUNT_IKEEP;
                } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
-                       dmapi_implies_ikeep = 0;
                        mp->m_flags &= ~XFS_MOUNT_IKEEP;
                } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
                        mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
@@ -370,12 +352,6 @@ xfs_parseargs(
                } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
                        mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
                        mp->m_qflags &= ~XFS_OQUOTA_ENFD;
-               } else if (!strcmp(this_char, MNTOPT_DMAPI)) {
-                       mp->m_flags |= XFS_MOUNT_DMAPI;
-               } else if (!strcmp(this_char, MNTOPT_XDSM)) {
-                       mp->m_flags |= XFS_MOUNT_DMAPI;
-               } else if (!strcmp(this_char, MNTOPT_DMI)) {
-                       mp->m_flags |= XFS_MOUNT_DMAPI;
                } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
                        mp->m_flags |= XFS_MOUNT_DELAYLOG;
                        cmn_err(CE_WARN,
@@ -387,9 +363,11 @@ xfs_parseargs(
                        cmn_err(CE_WARN,
        "XFS: ihashsize no longer used, option is deprecated.");
                } else if (!strcmp(this_char, "osyncisdsync")) {
-                       /* no-op, this is now the default */
                        cmn_err(CE_WARN,
-       "XFS: osyncisdsync is now the default, option is deprecated.");
+       "XFS: osyncisdsync has no effect, option is deprecated.");
+               } else if (!strcmp(this_char, "osyncisosync")) {
+                       cmn_err(CE_WARN,
+       "XFS: osyncisosync has no effect, option is deprecated.");
                } else if (!strcmp(this_char, "irixsgid")) {
                        cmn_err(CE_WARN,
        "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
@@ -430,12 +408,6 @@ xfs_parseargs(
                return EINVAL;
        }
 
-       if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) {
-               printk("XFS: %s option needs the mount point option as well\n",
-                       MNTOPT_DMAPI);
-               return EINVAL;
-       }
-
        if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
                cmn_err(CE_WARN,
                        "XFS: sunit and swidth must be specified together");
@@ -449,18 +421,6 @@ xfs_parseargs(
                return EINVAL;
        }
 
-       /*
-        * Applications using DMI filesystems often expect the
-        * inode generation number to be monotonically increasing.
-        * If we delete inode chunks we break this assumption, so
-        * keep unused inode chunks on disk for DMI filesystems
-        * until we come up with a better solution.
-        * Note that if "ikeep" or "noikeep" mount options are
-        * supplied, then they are honored.
-        */
-       if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep)
-               mp->m_flags |= XFS_MOUNT_IKEEP;
-
 done:
        if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
                /*
@@ -539,10 +499,8 @@ xfs_showargs(
                { XFS_MOUNT_SWALLOC,            "," MNTOPT_SWALLOC },
                { XFS_MOUNT_NOUUID,             "," MNTOPT_NOUUID },
                { XFS_MOUNT_NORECOVERY,         "," MNTOPT_NORECOVERY },
-               { XFS_MOUNT_OSYNCISOSYNC,       "," MNTOPT_OSYNCISOSYNC },
                { XFS_MOUNT_ATTR2,              "," MNTOPT_ATTR2 },
                { XFS_MOUNT_FILESTREAMS,        "," MNTOPT_FILESTREAM },
-               { XFS_MOUNT_DMAPI,              "," MNTOPT_DMAPI },
                { XFS_MOUNT_GRPID,              "," MNTOPT_GRPID },
                { XFS_MOUNT_DELAYLOG,           "," MNTOPT_DELAYLOG },
                { 0, NULL }
@@ -947,7 +905,7 @@ xfs_fs_destroy_inode(
 {
        struct xfs_inode        *ip = XFS_I(inode);
 
-       xfs_itrace_entry(ip);
+       trace_xfs_destroy_inode(ip);
 
        XFS_STATS_INC(vn_reclaim);
 
@@ -1063,10 +1021,8 @@ xfs_log_inode(
         * an inode in another recent transaction.  So we play it safe and
         * fire off the transaction anyway.
         */
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-       xfs_trans_ihold(tp, ip);
+       xfs_trans_ijoin(tp, ip);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       xfs_trans_set_sync(tp);
        error = xfs_trans_commit(tp, 0);
        xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
 
@@ -1082,27 +1038,18 @@ xfs_fs_write_inode(
        struct xfs_mount        *mp = ip->i_mount;
        int                     error = EAGAIN;
 
-       xfs_itrace_entry(ip);
+       trace_xfs_write_inode(ip);
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
                /*
-                * Make sure the inode has hit stable storage.  By using the
-                * log and the fsync transactions we reduce the IOs we have
-                * to do here from two (log and inode) to just the log.
-                *
-                * Note: We still need to do a delwri write of the inode after
-                * this to flush it to the backing buffer so that bulkstat
-                * works properly if this is the first time the inode has been
-                * written.  Because we hold the ilock atomically over the
-                * transaction commit and the inode flush we are guaranteed
-                * that the inode is not pinned when it returns. If the flush
-                * lock is already held, then the inode has already been
-                * flushed once and we don't need to flush it again.  Hence
-                * the code will only flush the inode if it isn't already
-                * being flushed.
+                * Make sure the inode has made it it into the log.  Instead
+                * of forcing it all the way to stable storage using a
+                * synchronous transaction we let the log force inside the
+                * ->sync_fs call do that for thus, which reduces the number
+                * of synchronous log foces dramatically.
                 */
                xfs_ioend_wait(ip);
                xfs_ilock(ip, XFS_ILOCK_SHARED);
@@ -1116,27 +1063,29 @@ xfs_fs_write_inode(
                 * We make this non-blocking if the inode is contended, return
                 * EAGAIN to indicate to the caller that they did not succeed.
                 * This prevents the flush path from blocking on inodes inside
-                * another operation right now, they get caught later by xfs_sync.
+                * another operation right now, they get caught later by
+                * xfs_sync.
                 */
                if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
                        goto out;
-       }
 
-       if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
-               goto out_unlock;
+               if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
+                       goto out_unlock;
 
-       /*
-        * Now we have the flush lock and the inode is not pinned, we can check
-        * if the inode is really clean as we know that there are no pending
-        * transaction completions, it is not waiting on the delayed write
-        * queue and there is no IO in progress.
-        */
-       if (xfs_inode_clean(ip)) {
-               xfs_ifunlock(ip);
-               error = 0;
-               goto out_unlock;
+               /*
+                * Now we have the flush lock and the inode is not pinned, we
+                * can check if the inode is really clean as we know that
+                * there are no pending transaction completions, it is not
+                * waiting on the delayed write queue and there is no IO in
+                * progress.
+                */
+               if (xfs_inode_clean(ip)) {
+                       xfs_ifunlock(ip);
+                       error = 0;
+                       goto out_unlock;
+               }
+               error = xfs_iflush(ip, 0);
        }
-       error = xfs_iflush(ip, 0);
 
  out_unlock:
        xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -1156,7 +1105,8 @@ xfs_fs_clear_inode(
 {
        xfs_inode_t             *ip = XFS_I(inode);
 
-       xfs_itrace_entry(ip);
+       trace_xfs_clear_inode(ip);
+
        XFS_STATS_INC(vn_rele);
        XFS_STATS_INC(vn_remove);
        XFS_STATS_DEC(vn_active);
@@ -1193,22 +1143,13 @@ xfs_fs_put_super(
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
+       /*
+        * Unregister the memory shrinker before we tear down the mount
+        * structure so we don't have memory reclaim racing with us here.
+        */
+       xfs_inode_shrinker_unregister(mp);
        xfs_syncd_stop(mp);
 
-       if (!(sb->s_flags & MS_RDONLY)) {
-               /*
-                * XXX(hch): this should be SYNC_WAIT.
-                *
-                * Or more likely not needed at all because the VFS is already
-                * calling ->sync_fs after shutting down all filestem
-                * operations and just before calling ->put_super.
-                */
-               xfs_sync_data(mp, 0);
-               xfs_sync_attr(mp, 0);
-       }
-
-       XFS_SEND_PREUNMOUNT(mp);
-
        /*
         * Blow away any referenced inode in the filestreams cache.
         * This can and will cause log traffic as inodes go inactive
@@ -1218,14 +1159,10 @@ xfs_fs_put_super(
 
        XFS_bflush(mp->m_ddev_targp);
 
-       XFS_SEND_UNMOUNT(mp);
-
        xfs_unmountfs(mp);
        xfs_freesb(mp);
-       xfs_inode_shrinker_unregister(mp);
        xfs_icsb_destroy_counters(mp);
        xfs_close_devices(mp);
-       xfs_dmops_put(mp);
        xfs_free_fsname(mp);
        kfree(mp);
 }
@@ -1543,7 +1480,6 @@ xfs_fs_fill_super(
        struct inode            *root;
        struct xfs_mount        *mp = NULL;
        int                     flags = 0, error = ENOMEM;
-       char                    *mtpt = NULL;
 
        mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
        if (!mp)
@@ -1559,7 +1495,7 @@ xfs_fs_fill_super(
        mp->m_super = sb;
        sb->s_fs_info = mp;
 
-       error = xfs_parseargs(mp, (char *)data, &mtpt);
+       error = xfs_parseargs(mp, (char *)data);
        if (error)
                goto out_free_fsname;
 
@@ -1571,16 +1507,12 @@ xfs_fs_fill_super(
 #endif
        sb->s_op = &xfs_super_operations;
 
-       error = xfs_dmops_get(mp);
-       if (error)
-               goto out_free_fsname;
-
        if (silent)
                flags |= XFS_MFSI_QUIET;
 
        error = xfs_open_devices(mp);
        if (error)
-               goto out_put_dmops;
+               goto out_free_fsname;
 
        if (xfs_icsb_init_counters(mp))
                mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
@@ -1608,8 +1540,6 @@ xfs_fs_fill_super(
        if (error)
                goto out_filestream_unmount;
 
-       XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
-
        sb->s_magic = XFS_SB_MAGIC;
        sb->s_blocksize = mp->m_sb.sb_blocksize;
        sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@@ -1638,7 +1568,6 @@ xfs_fs_fill_super(
 
        xfs_inode_shrinker_register(mp);
 
-       kfree(mtpt);
        return 0;
 
  out_filestream_unmount:
@@ -1648,11 +1577,8 @@ xfs_fs_fill_super(
  out_destroy_counters:
        xfs_icsb_destroy_counters(mp);
        xfs_close_devices(mp);
- out_put_dmops:
-       xfs_dmops_put(mp);
  out_free_fsname:
        xfs_free_fsname(mp);
-       kfree(mtpt);
        kfree(mp);
  out:
        return -error;
@@ -1759,6 +1685,12 @@ xfs_init_zones(void)
        if (!xfs_trans_zone)
                goto out_destroy_ifork_zone;
 
+       xfs_log_item_desc_zone =
+               kmem_zone_init(sizeof(struct xfs_log_item_desc),
+                              "xfs_log_item_desc");
+       if (!xfs_log_item_desc_zone)
+               goto out_destroy_trans_zone;
+
        /*
         * The size of the zone allocated buf log item is the maximum
         * size possible under XFS.  This wastes a little bit of memory,
@@ -1768,7 +1700,7 @@ xfs_init_zones(void)
                                (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
                                  NBWORD) * sizeof(int))), "xfs_buf_item");
        if (!xfs_buf_item_zone)
-               goto out_destroy_trans_zone;
+               goto out_destroy_log_item_desc_zone;
 
        xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
                        ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
@@ -1805,6 +1737,8 @@ xfs_init_zones(void)
        kmem_zone_destroy(xfs_efd_zone);
  out_destroy_buf_item_zone:
        kmem_zone_destroy(xfs_buf_item_zone);
+ out_destroy_log_item_desc_zone:
+       kmem_zone_destroy(xfs_log_item_desc_zone);
  out_destroy_trans_zone:
        kmem_zone_destroy(xfs_trans_zone);
  out_destroy_ifork_zone:
@@ -1835,6 +1769,7 @@ xfs_destroy_zones(void)
        kmem_zone_destroy(xfs_efi_zone);
        kmem_zone_destroy(xfs_efd_zone);
        kmem_zone_destroy(xfs_buf_item_zone);
+       kmem_zone_destroy(xfs_log_item_desc_zone);
        kmem_zone_destroy(xfs_trans_zone);
        kmem_zone_destroy(xfs_ifork_zone);
        kmem_zone_destroy(xfs_dabuf_zone);
index 519618e9279eda5c4d81ca398679d835ff77df28..1ef4a4d2d99796caf7ea1be05fc1becb56846f16 100644 (file)
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
 # define XFS_BIGFS_STRING
 #endif
 
-#ifdef CONFIG_XFS_DMAPI
-# define XFS_DMAPI_STRING      "dmapi support, "
-#else
-# define XFS_DMAPI_STRING
-#endif
-
 #ifdef DEBUG
 # define XFS_DBG_STRING                "debug"
 #else
@@ -72,7 +66,6 @@ extern void xfs_qm_exit(void);
                                XFS_SECURITY_STRING \
                                XFS_REALTIME_STRING \
                                XFS_BIGFS_STRING \
-                               XFS_DMAPI_STRING \
                                XFS_DBG_STRING /* DBG must be last */
 
 struct xfs_inode;
index a51a07c3a70cfa8b5514add3dae137402bbc56b6..dfcbd98d15997e62e7d8a433fa71e5b8a9912609 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_inode.h"
 #include "xfs_dinode.h"
 #include "xfs_error.h"
-#include "xfs_mru_cache.h"
 #include "xfs_filestream.h"
 #include "xfs_vnodeops.h"
-#include "xfs_utils.h"
-#include "xfs_buf_item.h"
 #include "xfs_inode_item.h"
-#include "xfs_rw.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
 
@@ -319,7 +308,7 @@ xfs_sync_inode_attr(
 /*
  * Write out pagecache data for the whole filesystem.
  */
-int
+STATIC int
 xfs_sync_data(
        struct xfs_mount        *mp,
        int                     flags)
@@ -340,7 +329,7 @@ xfs_sync_data(
 /*
  * Write out inode metadata (attributes) for the whole filesystem.
  */
-int
+STATIC int
 xfs_sync_attr(
        struct xfs_mount        *mp,
        int                     flags)
@@ -373,8 +362,7 @@ xfs_commit_dummy_trans(
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
 
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-       xfs_trans_ihold(tp, ip);
+       xfs_trans_ijoin(tp, ip);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
        error = xfs_trans_commit(tp, 0);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -867,7 +855,36 @@ out:
 reclaim:
        xfs_ifunlock(ip);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       xfs_ireclaim(ip);
+
+       XFS_STATS_INC(xs_ig_reclaims);
+       /*
+        * Remove the inode from the per-AG radix tree.
+        *
+        * Because radix_tree_delete won't complain even if the item was never
+        * added to the tree assert that it's been there before to catch
+        * problems with the inode life time early on.
+        */
+       write_lock(&pag->pag_ici_lock);
+       if (!radix_tree_delete(&pag->pag_ici_root,
+                               XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
+               ASSERT(0);
+       write_unlock(&pag->pag_ici_lock);
+
+       /*
+        * Here we do an (almost) spurious inode lock in order to coordinate
+        * with inode cache radix tree lookups.  This is because the lookup
+        * can reference the inodes in the cache without taking references.
+        *
+        * We make that OK here by ensuring that we wait until the inode is
+        * unlocked after the lookup before we go ahead and free it.  We get
+        * both the ilock and the iolock because the code may need to drop the
+        * ilock one but will still hold the iolock.
+        */
+       xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+       xfs_qm_dqdetach(ip);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+       xfs_inode_free(ip);
        return error;
 
 }
index e28139aaa4aa42c6085100856345e0156a33eef3..fe78726196f8200cd777298e82560f12f180e23c 100644 (file)
@@ -35,9 +35,6 @@ typedef struct xfs_sync_work {
 int xfs_syncd_init(struct xfs_mount *mp);
 void xfs_syncd_stop(struct xfs_mount *mp);
 
-int xfs_sync_attr(struct xfs_mount *mp, int flags);
-int xfs_sync_data(struct xfs_mount *mp, int flags);
-
 int xfs_quiesce_data(struct xfs_mount *mp);
 void xfs_quiesce_attr(struct xfs_mount *mp);
 
index d12be8470cbac741dcdc42ba402cb2e228dd93a2..88d25d4aa56ec34b0a9039af8d52d567c478a429 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
index 302820690904d1eff4de201ca97ca97a5fad4c9d..c657cdca2cd2648ac173332556bc371ffb577847 100644 (file)
@@ -317,8 +317,6 @@ DEFINE_BUF_EVENT(xfs_buf_init);
 DEFINE_BUF_EVENT(xfs_buf_free);
 DEFINE_BUF_EVENT(xfs_buf_hold);
 DEFINE_BUF_EVENT(xfs_buf_rele);
-DEFINE_BUF_EVENT(xfs_buf_pin);
-DEFINE_BUF_EVENT(xfs_buf_unpin);
 DEFINE_BUF_EVENT(xfs_buf_iodone);
 DEFINE_BUF_EVENT(xfs_buf_iorequest);
 DEFINE_BUF_EVENT(xfs_buf_bawrite);
@@ -541,7 +539,7 @@ DEFINE_LOCK_EVENT(xfs_ilock_nowait);
 DEFINE_LOCK_EVENT(xfs_ilock_demote);
 DEFINE_LOCK_EVENT(xfs_iunlock);
 
-DECLARE_EVENT_CLASS(xfs_iget_class,
+DECLARE_EVENT_CLASS(xfs_inode_class,
        TP_PROTO(struct xfs_inode *ip),
        TP_ARGS(ip),
        TP_STRUCT__entry(
@@ -557,16 +555,38 @@ DECLARE_EVENT_CLASS(xfs_iget_class,
                  __entry->ino)
 )
 
-#define DEFINE_IGET_EVENT(name) \
-DEFINE_EVENT(xfs_iget_class, name, \
+#define DEFINE_INODE_EVENT(name) \
+DEFINE_EVENT(xfs_inode_class, name, \
        TP_PROTO(struct xfs_inode *ip), \
        TP_ARGS(ip))
-DEFINE_IGET_EVENT(xfs_iget_skip);
-DEFINE_IGET_EVENT(xfs_iget_reclaim);
-DEFINE_IGET_EVENT(xfs_iget_found);
-DEFINE_IGET_EVENT(xfs_iget_alloc);
-
-DECLARE_EVENT_CLASS(xfs_inode_class,
+DEFINE_INODE_EVENT(xfs_iget_skip);
+DEFINE_INODE_EVENT(xfs_iget_reclaim);
+DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
+DEFINE_INODE_EVENT(xfs_iget_hit);
+DEFINE_INODE_EVENT(xfs_iget_miss);
+
+DEFINE_INODE_EVENT(xfs_getattr);
+DEFINE_INODE_EVENT(xfs_setattr);
+DEFINE_INODE_EVENT(xfs_readlink);
+DEFINE_INODE_EVENT(xfs_alloc_file_space);
+DEFINE_INODE_EVENT(xfs_free_file_space);
+DEFINE_INODE_EVENT(xfs_readdir);
+#ifdef CONFIG_XFS_POSIX_ACL
+DEFINE_INODE_EVENT(xfs_check_acl);
+#endif
+DEFINE_INODE_EVENT(xfs_vm_bmap);
+DEFINE_INODE_EVENT(xfs_file_ioctl);
+DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
+DEFINE_INODE_EVENT(xfs_ioctl_setattr);
+DEFINE_INODE_EVENT(xfs_file_fsync);
+DEFINE_INODE_EVENT(xfs_destroy_inode);
+DEFINE_INODE_EVENT(xfs_write_inode);
+DEFINE_INODE_EVENT(xfs_clear_inode);
+
+DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
+DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
+
+DECLARE_EVENT_CLASS(xfs_iref_class,
        TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
        TP_ARGS(ip, caller_ip),
        TP_STRUCT__entry(
@@ -591,20 +611,71 @@ DECLARE_EVENT_CLASS(xfs_inode_class,
                  (char *)__entry->caller_ip)
 )
 
-#define DEFINE_INODE_EVENT(name) \
-DEFINE_EVENT(xfs_inode_class, name, \
+#define DEFINE_IREF_EVENT(name) \
+DEFINE_EVENT(xfs_iref_class, name, \
        TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
        TP_ARGS(ip, caller_ip))
-DEFINE_INODE_EVENT(xfs_ihold);
-DEFINE_INODE_EVENT(xfs_irele);
-DEFINE_INODE_EVENT(xfs_inode_pin);
-DEFINE_INODE_EVENT(xfs_inode_unpin);
-DEFINE_INODE_EVENT(xfs_inode_unpin_nowait);
+DEFINE_IREF_EVENT(xfs_ihold);
+DEFINE_IREF_EVENT(xfs_irele);
+DEFINE_IREF_EVENT(xfs_inode_pin);
+DEFINE_IREF_EVENT(xfs_inode_unpin);
+DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
+
+DECLARE_EVENT_CLASS(xfs_namespace_class,
+       TP_PROTO(struct xfs_inode *dp, struct xfs_name *name),
+       TP_ARGS(dp, name),
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(xfs_ino_t, dp_ino)
+               __dynamic_array(char, name, name->len)
+       ),
+       TP_fast_assign(
+               __entry->dev = VFS_I(dp)->i_sb->s_dev;
+               __entry->dp_ino = dp->i_ino;
+               memcpy(__get_str(name), name->name, name->len);
+       ),
+       TP_printk("dev %d:%d dp ino 0x%llx name %s",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->dp_ino,
+                 __get_str(name))
+)
 
-/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
-DEFINE_INODE_EVENT(xfs_inode);
-#define xfs_itrace_entry(ip)    \
-       trace_xfs_inode(ip, _THIS_IP_)
+#define DEFINE_NAMESPACE_EVENT(name) \
+DEFINE_EVENT(xfs_namespace_class, name, \
+       TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \
+       TP_ARGS(dp, name))
+DEFINE_NAMESPACE_EVENT(xfs_remove);
+DEFINE_NAMESPACE_EVENT(xfs_link);
+DEFINE_NAMESPACE_EVENT(xfs_lookup);
+DEFINE_NAMESPACE_EVENT(xfs_create);
+DEFINE_NAMESPACE_EVENT(xfs_symlink);
+
+TRACE_EVENT(xfs_rename,
+       TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
+                struct xfs_name *src_name, struct xfs_name *target_name),
+       TP_ARGS(src_dp, target_dp, src_name, target_name),
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(xfs_ino_t, src_dp_ino)
+               __field(xfs_ino_t, target_dp_ino)
+               __dynamic_array(char, src_name, src_name->len)
+               __dynamic_array(char, target_name, target_name->len)
+       ),
+       TP_fast_assign(
+               __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
+               __entry->src_dp_ino = src_dp->i_ino;
+               __entry->target_dp_ino = target_dp->i_ino;
+               memcpy(__get_str(src_name), src_name->name, src_name->len);
+               memcpy(__get_str(target_name), target_name->name, target_name->len);
+       ),
+       TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
+                 " src name %s target name %s",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->src_dp_ino,
+                 __entry->target_dp_ino,
+                 __get_str(src_name),
+                 __get_str(target_name))
+)
 
 DECLARE_EVENT_CLASS(xfs_dquot_class,
        TP_PROTO(struct xfs_dquot *dqp),
@@ -684,9 +755,6 @@ DEFINE_DQUOT_EVENT(xfs_dqrele);
 DEFINE_DQUOT_EVENT(xfs_dqflush);
 DEFINE_DQUOT_EVENT(xfs_dqflush_force);
 DEFINE_DQUOT_EVENT(xfs_dqflush_done);
-/* not really iget events, but we re-use the format */
-DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
-DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
 
 DECLARE_EVENT_CLASS(xfs_loggrant_class,
        TP_PROTO(struct log *log, struct xlog_ticket *tic),
@@ -834,33 +902,29 @@ DECLARE_EVENT_CLASS(xfs_page_class,
                __field(loff_t, size)
                __field(unsigned long, offset)
                __field(int, delalloc)
-               __field(int, unmapped)
                __field(int, unwritten)
        ),
        TP_fast_assign(
-               int delalloc = -1, unmapped = -1, unwritten = -1;
+               int delalloc = -1, unwritten = -1;
 
                if (page_has_buffers(page))
-                       xfs_count_page_state(page, &delalloc,
-                                            &unmapped, &unwritten);
+                       xfs_count_page_state(page, &delalloc, &unwritten);
                __entry->dev = inode->i_sb->s_dev;
                __entry->ino = XFS_I(inode)->i_ino;
                __entry->pgoff = page_offset(page);
                __entry->size = i_size_read(inode);
                __entry->offset = off;
                __entry->delalloc = delalloc;
-               __entry->unmapped = unmapped;
                __entry->unwritten = unwritten;
        ),
        TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
-                 "delalloc %d unmapped %d unwritten %d",
+                 "delalloc %d unwritten %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->pgoff,
                  __entry->size,
                  __entry->offset,
                  __entry->delalloc,
-                 __entry->unmapped,
                  __entry->unwritten)
 )
 
index 585e7633dfc75b176df3989186628385b857bf8e..e1a2f6800e01da339586080d23b6a760747a4725 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_bmap.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_itable.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
@@ -64,8 +54,6 @@
    flush lock - ditto.
 */
 
-STATIC void            xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *);
-
 #ifdef DEBUG
 xfs_buftarg_t *xfs_dqerror_target;
 int xfs_do_dqerror;
@@ -390,21 +378,14 @@ xfs_qm_dqalloc(
                return (ESRCH);
        }
 
-       /*
-        * xfs_trans_commit normally decrements the vnode ref count
-        * when it unlocks the inode. Since we want to keep the quota
-        * inode around, we bump the vnode ref count now.
-        */
-       IHOLD(quotip);
-
-       xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL);
        nmaps = 1;
        if ((error = xfs_bmapi(tp, quotip,
                              offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
                              XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
                              &firstblock,
                              XFS_QM_DQALLOC_SPACE_RES(mp),
-                             &map, &nmaps, &flist, NULL))) {
+                             &map, &nmaps, &flist))) {
                goto error0;
        }
        ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -520,7 +501,7 @@ xfs_qm_dqtobp(
                error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
                                  XFS_DQUOT_CLUSTER_SIZE_FSB,
                                  XFS_BMAPI_METADATA,
-                                 NULL, 0, &map, &nmaps, NULL, NULL);
+                                 NULL, 0, &map, &nmaps, NULL);
 
                xfs_iunlock(quotip, XFS_ILOCK_SHARED);
                if (error)
@@ -1141,6 +1122,46 @@ xfs_qm_dqrele(
        xfs_qm_dqput(dqp);
 }
 
+/*
+ * This is the dquot flushing I/O completion routine.  It is called
+ * from interrupt level when the buffer containing the dquot is
+ * flushed to disk.  It is responsible for removing the dquot logitem
+ * from the AIL if it has not been re-logged, and unlocking the dquot's
+ * flush lock. This behavior is very similar to that of inodes..
+ */
+STATIC void
+xfs_qm_dqflush_done(
+       struct xfs_buf          *bp,
+       struct xfs_log_item     *lip)
+{
+       xfs_dq_logitem_t        *qip = (struct xfs_dq_logitem *)lip;
+       xfs_dquot_t             *dqp = qip->qli_dquot;
+       struct xfs_ail          *ailp = lip->li_ailp;
+
+       /*
+        * We only want to pull the item from the AIL if its
+        * location in the log has not changed since we started the flush.
+        * Thus, we only bother if the dquot's lsn has
+        * not changed. First we check the lsn outside the lock
+        * since it's cheaper, and then we recheck while
+        * holding the lock before removing the dquot from the AIL.
+        */
+       if ((lip->li_flags & XFS_LI_IN_AIL) &&
+           lip->li_lsn == qip->qli_flush_lsn) {
+
+               /* xfs_trans_ail_delete() drops the AIL lock. */
+               spin_lock(&ailp->xa_lock);
+               if (lip->li_lsn == qip->qli_flush_lsn)
+                       xfs_trans_ail_delete(ailp, lip);
+               else
+                       spin_unlock(&ailp->xa_lock);
+       }
+
+       /*
+        * Release the dq's flush lock since we're done with it.
+        */
+       xfs_dqfunlock(dqp);
+}
 
 /*
  * Write a modified dquot to disk.
@@ -1222,8 +1243,9 @@ xfs_qm_dqflush(
         * Attach an iodone routine so that we can remove this dquot from the
         * AIL and release the flush lock once the dquot is synced to disk.
         */
-       xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *))
-                             xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item));
+       xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
+                                 &dqp->q_logitem.qli_item);
+
        /*
         * If the buffer is pinned then push on the log so we won't
         * get stuck waiting in the write for too long.
@@ -1247,50 +1269,6 @@ xfs_qm_dqflush(
 
 }
 
-/*
- * This is the dquot flushing I/O completion routine.  It is called
- * from interrupt level when the buffer containing the dquot is
- * flushed to disk.  It is responsible for removing the dquot logitem
- * from the AIL if it has not been re-logged, and unlocking the dquot's
- * flush lock. This behavior is very similar to that of inodes..
- */
-/*ARGSUSED*/
-STATIC void
-xfs_qm_dqflush_done(
-       xfs_buf_t               *bp,
-       xfs_dq_logitem_t        *qip)
-{
-       xfs_dquot_t             *dqp;
-       struct xfs_ail          *ailp;
-
-       dqp = qip->qli_dquot;
-       ailp = qip->qli_item.li_ailp;
-
-       /*
-        * We only want to pull the item from the AIL if its
-        * location in the log has not changed since we started the flush.
-        * Thus, we only bother if the dquot's lsn has
-        * not changed. First we check the lsn outside the lock
-        * since it's cheaper, and then we recheck while
-        * holding the lock before removing the dquot from the AIL.
-        */
-       if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
-           qip->qli_item.li_lsn == qip->qli_flush_lsn) {
-
-               /* xfs_trans_ail_delete() drops the AIL lock. */
-               spin_lock(&ailp->xa_lock);
-               if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
-                       xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip);
-               else
-                       spin_unlock(&ailp->xa_lock);
-       }
-
-       /*
-        * Release the dq's flush lock since we're done with it.
-        */
-       xfs_dqfunlock(dqp);
-}
-
 int
 xfs_qm_dqlock_nowait(
        xfs_dquot_t *dqp)
index 8d89a24ae3244d1cd2ce75bcf22d37429d5f2ced..2a1f3dc10a02dba4f401a326fd72f07bd40212af 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_itable.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_qm.h"
 
+static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
+{
+       return container_of(lip, struct xfs_dq_logitem, qli_item);
+}
+
 /*
  * returns the number of iovecs needed to log the given dquot item.
  */
-/* ARGSUSED */
 STATIC uint
 xfs_qm_dquot_logitem_size(
-       xfs_dq_logitem_t        *logitem)
+       struct xfs_log_item     *lip)
 {
        /*
         * we need only two iovecs, one for the format, one for the real thing
         */
-       return (2);
+       return 2;
 }
 
 /*
@@ -66,22 +60,21 @@ xfs_qm_dquot_logitem_size(
  */
 STATIC void
 xfs_qm_dquot_logitem_format(
-       xfs_dq_logitem_t        *logitem,
-       xfs_log_iovec_t         *logvec)
+       struct xfs_log_item     *lip,
+       struct xfs_log_iovec    *logvec)
 {
-       ASSERT(logitem);
-       ASSERT(logitem->qli_dquot);
+       struct xfs_dq_logitem   *qlip = DQUOT_ITEM(lip);
 
-       logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
+       logvec->i_addr = &qlip->qli_format;
        logvec->i_len  = sizeof(xfs_dq_logformat_t);
        logvec->i_type = XLOG_REG_TYPE_QFORMAT;
        logvec++;
-       logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
+       logvec->i_addr = &qlip->qli_dquot->q_core;
        logvec->i_len  = sizeof(xfs_disk_dquot_t);
        logvec->i_type = XLOG_REG_TYPE_DQUOT;
 
-       ASSERT(2 == logitem->qli_item.li_desc->lid_size);
-       logitem->qli_format.qlf_size = 2;
+       ASSERT(2 == lip->li_desc->lid_size);
+       qlip->qli_format.qlf_size = 2;
 
 }
 
@@ -90,9 +83,9 @@ xfs_qm_dquot_logitem_format(
  */
 STATIC void
 xfs_qm_dquot_logitem_pin(
-       xfs_dq_logitem_t *logitem)
+       struct xfs_log_item     *lip)
 {
-       xfs_dquot_t *dqp = logitem->qli_dquot;
+       struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
 
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
        atomic_inc(&dqp->q_pincount);
@@ -104,27 +97,18 @@ xfs_qm_dquot_logitem_pin(
  * dquot must have been previously pinned with a call to
  * xfs_qm_dquot_logitem_pin().
  */
-/* ARGSUSED */
 STATIC void
 xfs_qm_dquot_logitem_unpin(
-       xfs_dq_logitem_t *logitem)
+       struct xfs_log_item     *lip,
+       int                     remove)
 {
-       xfs_dquot_t *dqp = logitem->qli_dquot;
+       struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
 
        ASSERT(atomic_read(&dqp->q_pincount) > 0);
        if (atomic_dec_and_test(&dqp->q_pincount))
                wake_up(&dqp->q_pinwait);
 }
 
-/* ARGSUSED */
-STATIC void
-xfs_qm_dquot_logitem_unpin_remove(
-       xfs_dq_logitem_t *logitem,
-       xfs_trans_t      *tp)
-{
-       xfs_qm_dquot_logitem_unpin(logitem);
-}
-
 /*
  * Given the logitem, this writes the corresponding dquot entry to disk
  * asynchronously. This is called with the dquot entry securely locked;
@@ -133,12 +117,10 @@ xfs_qm_dquot_logitem_unpin_remove(
  */
 STATIC void
 xfs_qm_dquot_logitem_push(
-       xfs_dq_logitem_t        *logitem)
+       struct xfs_log_item     *lip)
 {
-       xfs_dquot_t     *dqp;
-       int             error;
-
-       dqp = logitem->qli_dquot;
+       struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
+       int                     error;
 
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
        ASSERT(!completion_done(&dqp->q_flush));
@@ -160,27 +142,25 @@ xfs_qm_dquot_logitem_push(
        xfs_dqunlock(dqp);
 }
 
-/*ARGSUSED*/
 STATIC xfs_lsn_t
 xfs_qm_dquot_logitem_committed(
-       xfs_dq_logitem_t        *l,
+       struct xfs_log_item     *lip,
        xfs_lsn_t               lsn)
 {
        /*
         * We always re-log the entire dquot when it becomes dirty,
         * so, the latest copy _is_ the only one that matters.
         */
-       return (lsn);
+       return lsn;
 }
 
-
 /*
  * This is called to wait for the given dquot to be unpinned.
  * Most of these pin/unpin routines are plagiarized from inode code.
  */
 void
 xfs_qm_dqunpin_wait(
-       xfs_dquot_t     *dqp)
+       struct xfs_dquot        *dqp)
 {
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
        if (atomic_read(&dqp->q_pincount) == 0)
@@ -206,13 +186,12 @@ xfs_qm_dqunpin_wait(
  */
 STATIC void
 xfs_qm_dquot_logitem_pushbuf(
-       xfs_dq_logitem_t    *qip)
+       struct xfs_log_item     *lip)
 {
-       xfs_dquot_t     *dqp;
-       xfs_mount_t     *mp;
-       xfs_buf_t       *bp;
+       struct xfs_dq_logitem   *qlip = DQUOT_ITEM(lip);
+       struct xfs_dquot        *dqp = qlip->qli_dquot;
+       struct xfs_buf          *bp;
 
-       dqp = qip->qli_dquot;
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
 
        /*
@@ -220,22 +199,20 @@ xfs_qm_dquot_logitem_pushbuf(
         * inode flush completed and the inode was taken off the AIL.
         * So, just get out.
         */
-       if (completion_done(&dqp->q_flush)  ||
-           ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
+       if (completion_done(&dqp->q_flush) ||
+           !(lip->li_flags & XFS_LI_IN_AIL)) {
                xfs_dqunlock(dqp);
                return;
        }
-       mp = dqp->q_mount;
-       bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
-                       mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
+
+       bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
+                       dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
        xfs_dqunlock(dqp);
        if (!bp)
                return;
        if (XFS_BUF_ISDELAYWRITE(bp))
                xfs_buf_delwri_promote(bp);
        xfs_buf_relse(bp);
-       return;
-
 }
 
 /*
@@ -250,15 +227,14 @@ xfs_qm_dquot_logitem_pushbuf(
  */
 STATIC uint
 xfs_qm_dquot_logitem_trylock(
-       xfs_dq_logitem_t        *qip)
+       struct xfs_log_item     *lip)
 {
-       xfs_dquot_t             *dqp;
+       struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
 
-       dqp = qip->qli_dquot;
        if (atomic_read(&dqp->q_pincount) > 0)
                return XFS_ITEM_PINNED;
 
-       if (! xfs_qm_dqlock_nowait(dqp))
+       if (!xfs_qm_dqlock_nowait(dqp))
                return XFS_ITEM_LOCKED;
 
        if (!xfs_dqflock_nowait(dqp)) {
@@ -269,11 +245,10 @@ xfs_qm_dquot_logitem_trylock(
                return XFS_ITEM_PUSHBUF;
        }
 
-       ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
+       ASSERT(lip->li_flags & XFS_LI_IN_AIL);
        return XFS_ITEM_SUCCESS;
 }
 
-
 /*
  * Unlock the dquot associated with the log item.
  * Clear the fields of the dquot and dquot log item that
@@ -282,12 +257,10 @@ xfs_qm_dquot_logitem_trylock(
  */
 STATIC void
 xfs_qm_dquot_logitem_unlock(
-       xfs_dq_logitem_t    *ql)
+       struct xfs_log_item     *lip)
 {
-       xfs_dquot_t     *dqp;
+       struct xfs_dquot        *dqp = DQUOT_ITEM(lip)->qli_dquot;
 
-       ASSERT(ql != NULL);
-       dqp = ql->qli_dquot;
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
 
        /*
@@ -304,43 +277,32 @@ xfs_qm_dquot_logitem_unlock(
        xfs_dqunlock(dqp);
 }
 
-
 /*
  * this needs to stamp an lsn into the dquot, I think.
  * rpc's that look at user dquot's would then have to
  * push on the dependency recorded in the dquot
  */
-/* ARGSUSED */
 STATIC void
 xfs_qm_dquot_logitem_committing(
-       xfs_dq_logitem_t        *l,
+       struct xfs_log_item     *lip,
        xfs_lsn_t               lsn)
 {
-       return;
 }
 
-
 /*
  * This is the ops vector for dquots
  */
 static struct xfs_item_ops xfs_dquot_item_ops = {
-       .iop_size       = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
-       .iop_format     = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
-                                       xfs_qm_dquot_logitem_format,
-       .iop_pin        = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
-       .iop_unpin      = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin,
-       .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
-                                       xfs_qm_dquot_logitem_unpin_remove,
-       .iop_trylock    = (uint(*)(xfs_log_item_t*))
-                                       xfs_qm_dquot_logitem_trylock,
-       .iop_unlock     = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
-       .iop_committed  = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_qm_dquot_logitem_committed,
-       .iop_push       = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
-       .iop_pushbuf    = (void(*)(xfs_log_item_t*))
-                                       xfs_qm_dquot_logitem_pushbuf,
-       .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_qm_dquot_logitem_committing
+       .iop_size       = xfs_qm_dquot_logitem_size,
+       .iop_format     = xfs_qm_dquot_logitem_format,
+       .iop_pin        = xfs_qm_dquot_logitem_pin,
+       .iop_unpin      = xfs_qm_dquot_logitem_unpin,
+       .iop_trylock    = xfs_qm_dquot_logitem_trylock,
+       .iop_unlock     = xfs_qm_dquot_logitem_unlock,
+       .iop_committed  = xfs_qm_dquot_logitem_committed,
+       .iop_push       = xfs_qm_dquot_logitem_push,
+       .iop_pushbuf    = xfs_qm_dquot_logitem_pushbuf,
+       .iop_committing = xfs_qm_dquot_logitem_committing
 };
 
 /*
@@ -350,10 +312,9 @@ static struct xfs_item_ops xfs_dquot_item_ops = {
  */
 void
 xfs_qm_dquot_logitem_init(
-       struct xfs_dquot *dqp)
+       struct xfs_dquot        *dqp)
 {
-       xfs_dq_logitem_t  *lp;
-       lp = &dqp->q_logitem;
+       struct xfs_dq_logitem   *lp = &dqp->q_logitem;
 
        xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
                                        &xfs_dquot_item_ops);
@@ -374,16 +335,22 @@ xfs_qm_dquot_logitem_init(
 
 /*------------------  QUOTAOFF LOG ITEMS  -------------------*/
 
+static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
+{
+       return container_of(lip, struct xfs_qoff_logitem, qql_item);
+}
+
+
 /*
  * This returns the number of iovecs needed to log the given quotaoff item.
  * We only need 1 iovec for an quotaoff item.  It just logs the
  * quotaoff_log_format structure.
  */
-/*ARGSUSED*/
 STATIC uint
-xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_size(
+       struct xfs_log_item     *lip)
 {
-       return (1);
+       return 1;
 }
 
 /*
@@ -394,53 +361,46 @@ xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
  * slots in the quotaoff item have been filled.
  */
 STATIC void
-xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t  *qf,
-                          xfs_log_iovec_t      *log_vector)
+xfs_qm_qoff_logitem_format(
+       struct xfs_log_item     *lip,
+       struct xfs_log_iovec    *log_vector)
 {
-       ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
+       struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
+
+       ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF);
 
-       log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
+       log_vector->i_addr = &qflip->qql_format;
        log_vector->i_len = sizeof(xfs_qoff_logitem_t);
        log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
-       qf->qql_format.qf_size = 1;
+       qflip->qql_format.qf_size = 1;
 }
 
-
 /*
  * Pinning has no meaning for an quotaoff item, so just return.
  */
-/*ARGSUSED*/
 STATIC void
-xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_pin(
+       struct xfs_log_item     *lip)
 {
-       return;
 }
 
-
 /*
  * Since pinning has no meaning for an quotaoff item, unpinning does
  * not either.
  */
-/*ARGSUSED*/
 STATIC void
-xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_unpin(
+       struct xfs_log_item     *lip,
+       int                     remove)
 {
-       return;
-}
-
-/*ARGSUSED*/
-STATIC void
-xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
-{
-       return;
 }
 
 /*
  * Quotaoff items have no locking, so just return success.
  */
-/*ARGSUSED*/
 STATIC uint
-xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_trylock(
+       struct xfs_log_item     *lip)
 {
        return XFS_ITEM_LOCKED;
 }
@@ -449,53 +409,51 @@ xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
  * Quotaoff items have no locking or pushing, so return failure
  * so that the caller doesn't bother with us.
  */
-/*ARGSUSED*/
 STATIC void
-xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_unlock(
+       struct xfs_log_item     *lip)
 {
-       return;
 }
 
 /*
  * The quotaoff-start-item is logged only once and cannot be moved in the log,
  * so simply return the lsn at which it's been logged.
  */
-/*ARGSUSED*/
 STATIC xfs_lsn_t
-xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
+xfs_qm_qoff_logitem_committed(
+       struct xfs_log_item     *lip,
+       xfs_lsn_t               lsn)
 {
-       return (lsn);
+       return lsn;
 }
 
 /*
  * There isn't much you can do to push on an quotaoff item.  It is simply
  * stuck waiting for the log to be flushed to disk.
  */
-/*ARGSUSED*/
 STATIC void
-xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_push(
+       struct xfs_log_item     *lip)
 {
-       return;
 }
 
 
-/*ARGSUSED*/
 STATIC xfs_lsn_t
 xfs_qm_qoffend_logitem_committed(
-       xfs_qoff_logitem_t *qfe,
-       xfs_lsn_t lsn)
+       struct xfs_log_item     *lip,
+       xfs_lsn_t               lsn)
 {
-       xfs_qoff_logitem_t      *qfs;
-       struct xfs_ail          *ailp;
+       struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
+       struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
+       struct xfs_ail          *ailp = qfs->qql_item.li_ailp;
 
-       qfs = qfe->qql_start_lip;
-       ailp = qfs->qql_item.li_ailp;
-       spin_lock(&ailp->xa_lock);
        /*
         * Delete the qoff-start logitem from the AIL.
         * xfs_trans_ail_delete() drops the AIL lock.
         */
+       spin_lock(&ailp->xa_lock);
        xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
+
        kmem_free(qfs);
        kmem_free(qfe);
        return (xfs_lsn_t)-1;
@@ -515,71 +473,52 @@ xfs_qm_qoffend_logitem_committed(
  * (truly makes the quotaoff irrevocable).  If we do something else,
  * then maybe we don't need two.
  */
-/* ARGSUSED */
-STATIC void
-xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
-{
-       return;
-}
-
-/* ARGSUSED */
 STATIC void
-xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
+xfs_qm_qoff_logitem_committing(
+       struct xfs_log_item     *lip,
+       xfs_lsn_t               commit_lsn)
 {
-       return;
 }
 
 static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
-       .iop_size       = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
-       .iop_format     = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
-                                       xfs_qm_qoff_logitem_format,
-       .iop_pin        = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
-       .iop_unpin      = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
-       .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
-                                       xfs_qm_qoff_logitem_unpin_remove,
-       .iop_trylock    = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
-       .iop_unlock     = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
-       .iop_committed  = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_qm_qoffend_logitem_committed,
-       .iop_push       = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
-       .iop_pushbuf    = NULL,
-       .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_qm_qoffend_logitem_committing
+       .iop_size       = xfs_qm_qoff_logitem_size,
+       .iop_format     = xfs_qm_qoff_logitem_format,
+       .iop_pin        = xfs_qm_qoff_logitem_pin,
+       .iop_unpin      = xfs_qm_qoff_logitem_unpin,
+       .iop_trylock    = xfs_qm_qoff_logitem_trylock,
+       .iop_unlock     = xfs_qm_qoff_logitem_unlock,
+       .iop_committed  = xfs_qm_qoffend_logitem_committed,
+       .iop_push       = xfs_qm_qoff_logitem_push,
+       .iop_committing = xfs_qm_qoff_logitem_committing
 };
 
 /*
  * This is the ops vector shared by all quotaoff-start log items.
  */
 static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
-       .iop_size       = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
-       .iop_format     = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
-                                       xfs_qm_qoff_logitem_format,
-       .iop_pin        = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
-       .iop_unpin      = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
-       .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
-                                       xfs_qm_qoff_logitem_unpin_remove,
-       .iop_trylock    = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
-       .iop_unlock     = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
-       .iop_committed  = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_qm_qoff_logitem_committed,
-       .iop_push       = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
-       .iop_pushbuf    = NULL,
-       .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_qm_qoff_logitem_committing
+       .iop_size       = xfs_qm_qoff_logitem_size,
+       .iop_format     = xfs_qm_qoff_logitem_format,
+       .iop_pin        = xfs_qm_qoff_logitem_pin,
+       .iop_unpin      = xfs_qm_qoff_logitem_unpin,
+       .iop_trylock    = xfs_qm_qoff_logitem_trylock,
+       .iop_unlock     = xfs_qm_qoff_logitem_unlock,
+       .iop_committed  = xfs_qm_qoff_logitem_committed,
+       .iop_push       = xfs_qm_qoff_logitem_push,
+       .iop_committing = xfs_qm_qoff_logitem_committing
 };
 
 /*
  * Allocate and initialize an quotaoff item of the correct quota type(s).
  */
-xfs_qoff_logitem_t *
+struct xfs_qoff_logitem *
 xfs_qm_qoff_logitem_init(
-       struct xfs_mount *mp,
-       xfs_qoff_logitem_t *start,
-       uint flags)
+       struct xfs_mount        *mp,
+       struct xfs_qoff_logitem *start,
+       uint                    flags)
 {
-       xfs_qoff_logitem_t      *qf;
+       struct xfs_qoff_logitem *qf;
 
-       qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
+       qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
 
        xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
                        &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
@@ -587,5 +526,5 @@ xfs_qm_qoff_logitem_init(
        qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
        qf->qql_format.qf_flags = flags;
        qf->qql_start_lip = start;
-       return (qf);
+       return qf;
 }
index 67c018392d62a8ecef3c93f35c2b942b0b52137d..9a92407109a19921b2a0914711be6ce70b1b1f12 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
@@ -1497,7 +1490,7 @@ xfs_qm_dqiterate(
                                  maxlblkcnt - lblkno,
                                  XFS_BMAPI_METADATA,
                                  NULL,
-                                 0, map, &nmaps, NULL, NULL);
+                                 0, map, &nmaps, NULL);
                xfs_iunlock(qip, XFS_ILOCK_SHARED);
                if (error)
                        break;
@@ -1669,7 +1662,8 @@ xfs_qm_dqusage_adjust(
         * making us disable quotas for the file system.
         */
        if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
-               xfs_iput(ip, XFS_ILOCK_EXCL);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+               IRELE(ip);
                *res = BULKSTAT_RV_GIVEUP;
                return error;
        }
@@ -1682,7 +1676,8 @@ xfs_qm_dqusage_adjust(
                 * Walk thru the extent list and count the realtime blocks.
                 */
                if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
-                       xfs_iput(ip, XFS_ILOCK_EXCL);
+                       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+                       IRELE(ip);
                        if (udqp)
                                xfs_qm_dqput(udqp);
                        if (gdqp)
index 97b410c12794d5ddf52579625d4a256ed384d872..bea02d786c5d30763e3ceee6022fe248a63fb454 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_ialloc.h"
 #include "xfs_itable.h"
-#include "xfs_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_qm.h"
index 3d1fc79532e2ab3a2c736c5e799bdd3b39da71dc..8671a0b32644010a04e98f64518c63050add8048 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_ialloc.h"
 #include "xfs_itable.h"
 #include "xfs_bmap.h"
-#include "xfs_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_qm.h"
index b4487764e923b7c493aac813e9161764a0f0ccfb..d257eb8557c43a873644ea6481ec5611b9a0051f 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_ialloc.h"
 #include "xfs_itable.h"
 #include "xfs_bmap.h"
-#include "xfs_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_utils.h"
@@ -248,40 +238,74 @@ out_unlock:
        return error;
 }
 
+STATIC int
+xfs_qm_scall_trunc_qfile(
+       struct xfs_mount        *mp,
+       xfs_ino_t               ino)
+{
+       struct xfs_inode        *ip;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       if (ino == NULLFSINO)
+               return 0;
+
+       error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
+       if (error)
+               return error;
+
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
+       error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+                                 XFS_TRANS_PERM_LOG_RES,
+                                 XFS_ITRUNCATE_LOG_COUNT);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+               goto out_put;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip);
+
+       error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
+       if (error) {
+               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
+                                    XFS_TRANS_ABORT);
+               goto out_unlock;
+       }
+
+       xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+
+out_unlock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+out_put:
+       IRELE(ip);
+       return error;
+}
+
 int
 xfs_qm_scall_trunc_qfiles(
        xfs_mount_t     *mp,
        uint            flags)
 {
        int             error = 0, error2 = 0;
-       xfs_inode_t     *qip;
 
        if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
                qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
                return XFS_ERROR(EINVAL);
        }
 
-       if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
-               error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip);
-               if (!error) {
-                       error = xfs_truncate_file(mp, qip);
-                       IRELE(qip);
-               }
-       }
-
-       if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
-           mp->m_sb.sb_gquotino != NULLFSINO) {
-               error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip);
-               if (!error2) {
-                       error2 = xfs_truncate_file(mp, qip);
-                       IRELE(qip);
-               }
-       }
+       if (flags & XFS_DQ_USER)
+               error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
+       if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
+               error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
 
        return error ? error : error2;
 }
 
-
 /*
  * Switch on (a given) quota enforcement for a filesystem.  This takes
  * effect immediately.
@@ -875,8 +899,9 @@ xfs_dqrele_inode(
                xfs_qm_dqrele(ip->i_gdquot);
                ip->i_gdquot = NULL;
        }
-       xfs_iput(ip, XFS_ILOCK_EXCL);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
+       IRELE(ip);
        return 0;
 }
 
@@ -1143,7 +1168,8 @@ xfs_qm_internalqcheck_adjust(
         * of those now.
         */
        if (! ipreleased) {
-               xfs_iput(ip, lock_flags);
+               xfs_iunlock(ip, lock_flags);
+               IRELE(ip);
                ipreleased = B_TRUE;
                goto again;
        }
@@ -1160,7 +1186,8 @@ xfs_qm_internalqcheck_adjust(
                ASSERT(gd);
                xfs_qm_internalqcheck_dqadjust(ip, gd);
        }
-       xfs_iput(ip, lock_flags);
+       xfs_iunlock(ip, lock_flags);
+       IRELE(ip);
        *res = BULKSTAT_RV_DIDONE;
        return (0);
 }
index 061d827da33cd6ea9411c99665ab74d309ebfde9..7de91d1b75c06c91daea768da891ab49a4152818 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
-#include "xfs_dmapi.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_ialloc.h"
 #include "xfs_itable.h"
-#include "xfs_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_rw.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
@@ -59,16 +49,14 @@ xfs_trans_dqjoin(
        xfs_trans_t     *tp,
        xfs_dquot_t     *dqp)
 {
-       xfs_dq_logitem_t    *lp = &dqp->q_logitem;
-
        ASSERT(dqp->q_transp != tp);
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       ASSERT(lp->qli_dquot == dqp);
+       ASSERT(dqp->q_logitem.qli_dquot == dqp);
 
        /*
         * Get a log_item_desc to point at the new item.
         */
-       (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp));
+       xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
 
        /*
         * Initialize i_transp so we can later determine if this dquot is
@@ -93,16 +81,11 @@ xfs_trans_log_dquot(
        xfs_trans_t     *tp,
        xfs_dquot_t     *dqp)
 {
-       xfs_log_item_desc_t     *lidp;
-
        ASSERT(dqp->q_transp == tp);
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
 
-       lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
-       ASSERT(lidp != NULL);
-
        tp->t_flags |= XFS_TRANS_DIRTY;
-       lidp->lid_flags |= XFS_LID_DIRTY;
+       dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 }
 
 /*
@@ -874,9 +857,8 @@ xfs_trans_get_qoff_item(
        /*
         * Get a log_item_desc to point at the new item.
         */
-       (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q);
-
-       return (q);
+       xfs_trans_add_item(tp, &q->qql_item);
+       return q;
 }
 
 
@@ -890,13 +872,8 @@ xfs_trans_log_quotaoff_item(
        xfs_trans_t             *tp,
        xfs_qoff_logitem_t      *qlp)
 {
-       xfs_log_item_desc_t     *lidp;
-
-       lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp);
-       ASSERT(lidp != NULL);
-
        tp->t_flags |= XFS_TRANS_DIRTY;
-       lidp->lid_flags |= XFS_LID_DIRTY;
+       qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 }
 
 STATIC void
index 3f3610a7ee059210eea3beadbffe13f87609dce9..975aa10e1a47a3536499b035947ed0c5ac5098a5 100644 (file)
@@ -22,7 +22,6 @@
 #include "xfs_sb.h"
 #include "xfs_inum.h"
 #include "xfs_ag.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_error.h"
 
index a7fbe8a99b12bf2c360ea19372a380ae5d27c855..af168faccc7a5cb8edfacf2e0e0a350028d449fe 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
@@ -688,8 +683,6 @@ xfs_alloc_ag_vextent_near(
        xfs_agblock_t   ltbno;          /* start bno of left side entry */
        xfs_agblock_t   ltbnoa;         /* aligned ... */
        xfs_extlen_t    ltdiff;         /* difference to left side entry */
-       /*REFERENCED*/
-       xfs_agblock_t   ltend;          /* end bno of left side entry */
        xfs_extlen_t    ltlen;          /* length of left side entry */
        xfs_extlen_t    ltlena;         /* aligned ... */
        xfs_agblock_t   ltnew;          /* useful start bno of left side */
@@ -814,8 +807,7 @@ xfs_alloc_ag_vextent_near(
                if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
                        goto error0;
                XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-               ltend = ltbno + ltlen;
-               ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+               ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
                args->len = blen;
                if (!xfs_alloc_fix_minleft(args)) {
                        xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
@@ -828,7 +820,7 @@ xfs_alloc_ag_vextent_near(
                 */
                args->agbno = bnew;
                ASSERT(bnew >= ltbno);
-               ASSERT(bnew + blen <= ltend);
+               ASSERT(bnew + blen <= ltbno + ltlen);
                /*
                 * Set up a cursor for the by-bno tree.
                 */
@@ -1157,7 +1149,6 @@ xfs_alloc_ag_vextent_near(
        /*
         * Fix up the length and compute the useful address.
         */
-       ltend = ltbno + ltlen;
        args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
        xfs_alloc_fix_len(args);
        if (!xfs_alloc_fix_minleft(args)) {
@@ -1170,7 +1161,7 @@ xfs_alloc_ag_vextent_near(
        (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
                ltlen, &ltnew);
        ASSERT(ltnew >= ltbno);
-       ASSERT(ltnew + rlen <= ltend);
+       ASSERT(ltnew + rlen <= ltbno + ltlen);
        ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
        args->agbno = ltnew;
        if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
index 6d05199b667cea3c72576750924a8ee8a63effcc..895009a97271fbbd27fcb63b97b9546f375a8778 100644 (file)
@@ -27,16 +27,16 @@ struct xfs_busy_extent;
 /*
  * Freespace allocation types.  Argument to xfs_alloc_[v]extent.
  */
-typedef enum xfs_alloctype
-{
-       XFS_ALLOCTYPE_ANY_AG,           /* allocate anywhere, use rotor */
-       XFS_ALLOCTYPE_FIRST_AG,         /* ... start at ag 0 */
-       XFS_ALLOCTYPE_START_AG,         /* anywhere, start in this a.g. */
-       XFS_ALLOCTYPE_THIS_AG,          /* anywhere in this a.g. */
-       XFS_ALLOCTYPE_START_BNO,        /* near this block else anywhere */
-       XFS_ALLOCTYPE_NEAR_BNO,         /* in this a.g. and near this block */
-       XFS_ALLOCTYPE_THIS_BNO          /* at exactly this block */
-} xfs_alloctype_t;
+#define XFS_ALLOCTYPE_ANY_AG   0x01    /* allocate anywhere, use rotor */
+#define XFS_ALLOCTYPE_FIRST_AG 0x02    /* ... start at ag 0 */
+#define XFS_ALLOCTYPE_START_AG 0x04    /* anywhere, start in this a.g. */
+#define XFS_ALLOCTYPE_THIS_AG  0x08    /* anywhere in this a.g. */
+#define XFS_ALLOCTYPE_START_BNO        0x10    /* near this block else anywhere */
+#define XFS_ALLOCTYPE_NEAR_BNO 0x20    /* in this a.g. and near this block */
+#define XFS_ALLOCTYPE_THIS_BNO 0x40    /* at exactly this block */
+
+/* this should become an enum again when the tracing code is fixed */
+typedef unsigned int xfs_alloctype_t;
 
 #define XFS_ALLOC_TYPES \
        { XFS_ALLOCTYPE_ANY_AG,         "ANY_AG" }, \
index 83f4942187594ac4b998712dc66b2385b083963e..97f7328967fdfe8148b8e1e2479feaa98499859b 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_btree_trace.h"
-#include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
index b9c196a53c42e219a6cca9113d95be704ecc8f9e..c2568242a9015eaf0bd571104bfc2c468fc2e6bb 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
 #include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
-#include "xfs_btree.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
@@ -325,8 +319,7 @@ xfs_attr_set_int(
                return (error);
        }
 
-       xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
-       xfs_trans_ihold(args.trans, dp);
+       xfs_trans_ijoin(args.trans, dp);
 
        /*
         * If the attribute list is non-existent or a shortform list,
@@ -396,10 +389,8 @@ xfs_attr_set_int(
                 * bmap_finish() may have committed the last trans and started
                 * a new one.  We need the inode to be in all transactions.
                 */
-               if (committed) {
-                       xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
-                       xfs_trans_ihold(args.trans, dp);
-               }
+               if (committed)
+                       xfs_trans_ijoin(args.trans, dp);
 
                /*
                 * Commit the leaf transformation.  We'll need another (linked)
@@ -544,8 +535,7 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
         * No need to make quota reservations here. We expect to release some
         * blocks not allocate in the common case.
         */
-       xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
-       xfs_trans_ihold(args.trans, dp);
+       xfs_trans_ijoin(args.trans, dp);
 
        /*
         * Decide on what work routines to call based on the inode size.
@@ -821,8 +811,7 @@ xfs_attr_inactive(xfs_inode_t *dp)
         * No need to make quota reservations here. We expect to release some
         * blocks, not allocate, in the common case.
         */
-       xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
-       xfs_trans_ihold(trans, dp);
+       xfs_trans_ijoin(trans, dp);
 
        /*
         * Decide on what work routines to call based on the inode size.
@@ -981,10 +970,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
                 * bmap_finish() may have committed the last trans and started
                 * a new one.  We need the inode to be in all transactions.
                 */
-               if (committed) {
-                       xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                       xfs_trans_ihold(args->trans, dp);
-               }
+               if (committed)
+                       xfs_trans_ijoin(args->trans, dp);
 
                /*
                 * Commit the current trans (including the inode) and start
@@ -1085,10 +1072,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
                         * and started a new one.  We need the inode to be
                         * in all transactions.
                         */
-                       if (committed) {
-                               xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                               xfs_trans_ihold(args->trans, dp);
-                       }
+                       if (committed)
+                               xfs_trans_ijoin(args->trans, dp);
                } else
                        xfs_da_buf_done(bp);
 
@@ -1161,10 +1146,8 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
                 * bmap_finish() may have committed the last trans and started
                 * a new one.  We need the inode to be in all transactions.
                 */
-               if (committed) {
-                       xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                       xfs_trans_ihold(args->trans, dp);
-               }
+               if (committed)
+                       xfs_trans_ijoin(args->trans, dp);
        } else
                xfs_da_buf_done(bp);
        return(0);
@@ -1317,10 +1300,8 @@ restart:
                         * and started a new one.  We need the inode to be
                         * in all transactions.
                         */
-                       if (committed) {
-                               xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                               xfs_trans_ihold(args->trans, dp);
-                       }
+                       if (committed)
+                               xfs_trans_ijoin(args->trans, dp);
 
                        /*
                         * Commit the node conversion and start the next
@@ -1356,10 +1337,8 @@ restart:
                 * bmap_finish() may have committed the last trans and started
                 * a new one.  We need the inode to be in all transactions.
                 */
-               if (committed) {
-                       xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                       xfs_trans_ihold(args->trans, dp);
-               }
+               if (committed)
+                       xfs_trans_ijoin(args->trans, dp);
        } else {
                /*
                 * Addition succeeded, update Btree hashvals.
@@ -1470,10 +1449,8 @@ restart:
                         * and started a new one.  We need the inode to be
                         * in all transactions.
                         */
-                       if (committed) {
-                               xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                               xfs_trans_ihold(args->trans, dp);
-                       }
+                       if (committed)
+                               xfs_trans_ijoin(args->trans, dp);
                }
 
                /*
@@ -1604,10 +1581,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
                 * bmap_finish() may have committed the last trans and started
                 * a new one.  We need the inode to be in all transactions.
                 */
-               if (committed) {
-                       xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                       xfs_trans_ihold(args->trans, dp);
-               }
+               if (committed)
+                       xfs_trans_ijoin(args->trans, dp);
 
                /*
                 * Commit the Btree join operation and start a new trans.
@@ -1658,10 +1633,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
                         * and started a new one.  We need the inode to be
                         * in all transactions.
                         */
-                       if (committed) {
-                               xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                               xfs_trans_ihold(args->trans, dp);
-                       }
+                       if (committed)
+                               xfs_trans_ijoin(args->trans, dp);
                } else
                        xfs_da_brelse(args->trans, bp);
        }
@@ -2004,7 +1977,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
                error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
                                  args->rmtblkcnt,
                                  XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
-                                 NULL, 0, map, &nmap, NULL, NULL);
+                                 NULL, 0, map, &nmap, NULL);
                if (error)
                        return(error);
                ASSERT(nmap >= 1);
@@ -2083,7 +2056,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
                                  XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
                                                        XFS_BMAPI_WRITE,
                                  args->firstblock, args->total, &map, &nmap,
-                                 args->flist, NULL);
+                                 args->flist);
                if (!error) {
                        error = xfs_bmap_finish(&args->trans, args->flist,
                                                &committed);
@@ -2099,10 +2072,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
                 * bmap_finish() may have committed the last trans and started
                 * a new one.  We need the inode to be in all transactions.
                 */
-               if (committed) {
-                       xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
-                       xfs_trans_ihold(args->trans, dp);
-               }
+               if (committed)
+                       xfs_trans_ijoin(args->trans, dp);
 
                ASSERT(nmap == 1);
                ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@@ -2136,7 +2107,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
                                  args->rmtblkcnt,
                                  XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
                                  args->firstblock, 0, &map, &nmap,
-                                 NULL, NULL);
+                                 NULL);
                if (error) {
                        return(error);
                }
@@ -2201,7 +2172,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
                                        args->rmtblkcnt,
                                        XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
                                        args->firstblock, 0, &map, &nmap,
-                                       args->flist, NULL);
+                                       args->flist);
                if (error) {
                        return(error);
                }
@@ -2239,7 +2210,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
                error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
                                    XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
                                    1, args->firstblock, args->flist,
-                                   NULL, &done);
+                                   &done);
                if (!error) {
                        error = xfs_bmap_finish(&args->trans, args->flist,
                                                &committed);
@@ -2255,10 +2226,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
                 * bmap_finish() may have committed the last trans and started
                 * a new one.  We need the inode to be in all transactions.
                 */
-               if (committed) {
-                       xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL);
-                       xfs_trans_ihold(args->trans, args->dp);
-               }
+               if (committed)
+                       xfs_trans_ijoin(args->trans, args->dp);
 
                /*
                 * Close out trans and start the next one in the chain.
index a90ce74fc256904837381f61e49c0e569818e89f..a6cff8edcdb6a33cfa6b5554c06be24439801528 100644 (file)
@@ -24,8 +24,6 @@
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
@@ -33,7 +31,6 @@
 #include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_btree.h"
-#include "xfs_dir2_sf.h"
 #include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
@@ -2931,7 +2928,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
                nmap = 1;
                error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
                                        XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
-                                       NULL, 0, &map, &nmap, NULL, NULL);
+                                       NULL, 0, &map, &nmap, NULL);
                if (error) {
                        return(error);
                }
index 99587ded043f84b580f0d3331df408a0aa0960fe..23f14e595c18cd3a8068796f5b73396fa9db98f9 100644 (file)
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
 #include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
-#include "xfs_ialloc.h"
 #include "xfs_itable.h"
 #include "xfs_dir2_data.h"
 #include "xfs_dir2_leaf.h"
@@ -104,7 +101,6 @@ xfs_bmap_add_extent(
        xfs_fsblock_t           *first, /* pointer to firstblock variable */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     whichfork, /* data or attr fork */
        int                     rsvd);  /* OK to allocate reserved blocks */
 
@@ -122,7 +118,6 @@ xfs_bmap_add_extent_delay_real(
        xfs_fsblock_t           *first, /* pointer to firstblock variable */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     rsvd);  /* OK to allocate reserved blocks */
 
 /*
@@ -135,7 +130,6 @@ xfs_bmap_add_extent_hole_delay(
        xfs_extnum_t            idx,    /* extent number to update/insert */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp,/* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     rsvd);  /* OK to allocate reserved blocks */
 
 /*
@@ -149,7 +143,6 @@ xfs_bmap_add_extent_hole_real(
        xfs_btree_cur_t         *cur,   /* if null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     whichfork); /* data or attr fork */
 
 /*
@@ -162,8 +155,7 @@ xfs_bmap_add_extent_unwritten_real(
        xfs_extnum_t            idx,    /* extent number to update/insert */
        xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
-       int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta); /* Change made to incore extents */
+       int                     *logflagsp); /* inode logging flags */
 
 /*
  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
@@ -200,7 +192,6 @@ xfs_bmap_del_extent(
        xfs_btree_cur_t         *cur,   /* if null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp,/* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     whichfork, /* data or attr fork */
        int                     rsvd);   /* OK to allocate reserved blocks */
 
@@ -489,7 +480,6 @@ xfs_bmap_add_extent(
        xfs_fsblock_t           *first, /* pointer to firstblock variable */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     whichfork, /* data or attr fork */
        int                     rsvd)   /* OK to use reserved data blocks */
 {
@@ -524,15 +514,6 @@ xfs_bmap_add_extent(
                        logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
                } else
                        logflags = 0;
-               /* DELTA: single new extent */
-               if (delta) {
-                       if (delta->xed_startoff > new->br_startoff)
-                               delta->xed_startoff = new->br_startoff;
-                       if (delta->xed_blockcount <
-                                       new->br_startoff + new->br_blockcount)
-                               delta->xed_blockcount = new->br_startoff +
-                                               new->br_blockcount;
-               }
        }
        /*
         * Any kind of new delayed allocation goes here.
@@ -542,7 +523,7 @@ xfs_bmap_add_extent(
                        ASSERT((cur->bc_private.b.flags &
                                XFS_BTCUR_BPRV_WASDEL) == 0);
                if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
-                               &logflags, delta, rsvd)))
+                               &logflags, rsvd)))
                        goto done;
        }
        /*
@@ -553,7 +534,7 @@ xfs_bmap_add_extent(
                        ASSERT((cur->bc_private.b.flags &
                                XFS_BTCUR_BPRV_WASDEL) == 0);
                if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
-                               &logflags, delta, whichfork)))
+                               &logflags, whichfork)))
                        goto done;
        } else {
                xfs_bmbt_irec_t prev;   /* old extent at offset idx */
@@ -578,17 +559,17 @@ xfs_bmap_add_extent(
                                                XFS_BTCUR_BPRV_WASDEL);
                                if ((error = xfs_bmap_add_extent_delay_real(ip,
                                        idx, &cur, new, &da_new, first, flist,
-                                       &logflags, delta, rsvd)))
+                                       &logflags, rsvd)))
                                        goto done;
                        } else if (new->br_state == XFS_EXT_NORM) {
                                ASSERT(new->br_state == XFS_EXT_NORM);
                                if ((error = xfs_bmap_add_extent_unwritten_real(
-                                       ip, idx, &cur, new, &logflags, delta)))
+                                       ip, idx, &cur, new, &logflags)))
                                        goto done;
                        } else {
                                ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
                                if ((error = xfs_bmap_add_extent_unwritten_real(
-                                       ip, idx, &cur, new, &logflags, delta)))
+                                       ip, idx, &cur, new, &logflags)))
                                        goto done;
                        }
                        ASSERT(*curp == cur || *curp == NULL);
@@ -601,7 +582,7 @@ xfs_bmap_add_extent(
                                ASSERT((cur->bc_private.b.flags &
                                        XFS_BTCUR_BPRV_WASDEL) == 0);
                        if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
-                                       new, &logflags, delta, whichfork)))
+                                       new, &logflags, whichfork)))
                                goto done;
                }
        }
@@ -666,7 +647,6 @@ xfs_bmap_add_extent_delay_real(
        xfs_fsblock_t           *first, /* pointer to firstblock variable */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     rsvd)   /* OK to use reserved data block allocation */
 {
        xfs_btree_cur_t         *cur;   /* btree cursor */
@@ -797,11 +777,6 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                }
                *dnew = 0;
-               /* DELTA: Three in-core extents are replaced by one. */
-               temp = LEFT.br_startoff;
-               temp2 = LEFT.br_blockcount +
-                       PREV.br_blockcount +
-                       RIGHT.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@@ -832,10 +807,6 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                }
                *dnew = 0;
-               /* DELTA: Two in-core extents are replaced by one. */
-               temp = LEFT.br_startoff;
-               temp2 = LEFT.br_blockcount +
-                       PREV.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -867,10 +838,6 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                }
                *dnew = 0;
-               /* DELTA: Two in-core extents are replaced by one. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount +
-                       RIGHT.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@@ -900,9 +867,6 @@ xfs_bmap_add_extent_delay_real(
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
                *dnew = 0;
-               /* DELTA: The in-core extent described by new changed type. */
-               temp = new->br_startoff;
-               temp2 = new->br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@@ -942,10 +906,6 @@ xfs_bmap_add_extent_delay_real(
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
                trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
                *dnew = temp;
-               /* DELTA: The boundary between two in-core extents moved. */
-               temp = LEFT.br_startoff;
-               temp2 = LEFT.br_blockcount +
-                       PREV.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING:
@@ -990,9 +950,6 @@ xfs_bmap_add_extent_delay_real(
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
                trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
                *dnew = temp;
-               /* DELTA: One in-core extent is split in two. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount;
                break;
 
        case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1031,10 +988,6 @@ xfs_bmap_add_extent_delay_real(
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
                trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
                *dnew = temp;
-               /* DELTA: The boundary between two in-core extents moved. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount +
-                       RIGHT.br_blockcount;
                break;
 
        case BMAP_RIGHT_FILLING:
@@ -1078,9 +1031,6 @@ xfs_bmap_add_extent_delay_real(
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
                trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
                *dnew = temp;
-               /* DELTA: One in-core extent is split in two. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount;
                break;
 
        case 0:
@@ -1161,9 +1111,6 @@ xfs_bmap_add_extent_delay_real(
                        nullstartblock((int)temp2));
                trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
                *dnew = temp + temp2;
-               /* DELTA: One in-core extent is split in three. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@@ -1179,13 +1126,6 @@ xfs_bmap_add_extent_delay_real(
                ASSERT(0);
        }
        *curp = cur;
-       if (delta) {
-               temp2 += temp;
-               if (delta->xed_startoff > temp)
-                       delta->xed_startoff = temp;
-               if (delta->xed_blockcount < temp2)
-                       delta->xed_blockcount = temp2;
-       }
 done:
        *logflagsp = rval;
        return error;
@@ -1204,8 +1144,7 @@ xfs_bmap_add_extent_unwritten_real(
        xfs_extnum_t            idx,    /* extent number to update/insert */
        xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
-       int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta) /* Change made to incore extents */
+       int                     *logflagsp) /* inode logging flags */
 {
        xfs_btree_cur_t         *cur;   /* btree cursor */
        xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
@@ -1219,8 +1158,6 @@ xfs_bmap_add_extent_unwritten_real(
                                        /* left is 0, right is 1, prev is 2 */
        int                     rval=0; /* return value (logging flags) */
        int                     state = 0;/* state bits, accessed thru macros */
-       xfs_filblks_t           temp=0;
-       xfs_filblks_t           temp2=0;
 
 #define        LEFT            r[0]
 #define        RIGHT           r[1]
@@ -1341,11 +1278,6 @@ xfs_bmap_add_extent_unwritten_real(
                                RIGHT.br_blockcount, LEFT.br_state)))
                                goto done;
                }
-               /* DELTA: Three in-core extents are replaced by one. */
-               temp = LEFT.br_startoff;
-               temp2 = LEFT.br_blockcount +
-                       PREV.br_blockcount +
-                       RIGHT.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@@ -1382,10 +1314,6 @@ xfs_bmap_add_extent_unwritten_real(
                                LEFT.br_state)))
                                goto done;
                }
-               /* DELTA: Two in-core extents are replaced by one. */
-               temp = LEFT.br_startoff;
-               temp2 = LEFT.br_blockcount +
-                       PREV.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1422,10 +1350,6 @@ xfs_bmap_add_extent_unwritten_real(
                                newext)))
                                goto done;
                }
-               /* DELTA: Two in-core extents are replaced by one. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount +
-                       RIGHT.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@@ -1453,9 +1377,6 @@ xfs_bmap_add_extent_unwritten_real(
                                newext)))
                                goto done;
                }
-               /* DELTA: The in-core extent described by new changed type. */
-               temp = new->br_startoff;
-               temp2 = new->br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@@ -1501,10 +1422,6 @@ xfs_bmap_add_extent_unwritten_real(
                                LEFT.br_state))
                                goto done;
                }
-               /* DELTA: The boundary between two in-core extents moved. */
-               temp = LEFT.br_startoff;
-               temp2 = LEFT.br_blockcount +
-                       PREV.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING:
@@ -1544,9 +1461,6 @@ xfs_bmap_add_extent_unwritten_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               /* DELTA: One in-core extent is split in two. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount;
                break;
 
        case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1587,10 +1501,6 @@ xfs_bmap_add_extent_unwritten_real(
                                newext)))
                                goto done;
                }
-               /* DELTA: The boundary between two in-core extents moved. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount +
-                       RIGHT.br_blockcount;
                break;
 
        case BMAP_RIGHT_FILLING:
@@ -1630,9 +1540,6 @@ xfs_bmap_add_extent_unwritten_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               /* DELTA: One in-core extent is split in two. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount;
                break;
 
        case 0:
@@ -1692,9 +1599,6 @@ xfs_bmap_add_extent_unwritten_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               /* DELTA: One in-core extent is split in three. */
-               temp = PREV.br_startoff;
-               temp2 = PREV.br_blockcount;
                break;
 
        case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@@ -1710,13 +1614,6 @@ xfs_bmap_add_extent_unwritten_real(
                ASSERT(0);
        }
        *curp = cur;
-       if (delta) {
-               temp2 += temp;
-               if (delta->xed_startoff > temp)
-                       delta->xed_startoff = temp;
-               if (delta->xed_blockcount < temp2)
-                       delta->xed_blockcount = temp2;
-       }
 done:
        *logflagsp = rval;
        return error;
@@ -1736,7 +1633,6 @@ xfs_bmap_add_extent_hole_delay(
        xfs_extnum_t            idx,    /* extent number to update/insert */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     rsvd)           /* OK to allocate reserved blocks */
 {
        xfs_bmbt_rec_host_t     *ep;    /* extent record for idx */
@@ -1747,7 +1643,6 @@ xfs_bmap_add_extent_hole_delay(
        xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
        int                     state;  /* state bits, accessed thru macros */
        xfs_filblks_t           temp=0; /* temp for indirect calculations */
-       xfs_filblks_t           temp2=0;
 
        ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
        ep = xfs_iext_get_ext(ifp, idx);
@@ -1819,9 +1714,6 @@ xfs_bmap_add_extent_hole_delay(
 
                xfs_iext_remove(ip, idx, 1, state);
                ip->i_df.if_lastex = idx - 1;
-               /* DELTA: Two in-core extents were replaced by one. */
-               temp2 = temp;
-               temp = left.br_startoff;
                break;
 
        case BMAP_LEFT_CONTIG:
@@ -1841,9 +1733,6 @@ xfs_bmap_add_extent_hole_delay(
                trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
 
                ip->i_df.if_lastex = idx - 1;
-               /* DELTA: One in-core extent grew into a hole. */
-               temp2 = temp;
-               temp = left.br_startoff;
                break;
 
        case BMAP_RIGHT_CONTIG:
@@ -1862,9 +1751,6 @@ xfs_bmap_add_extent_hole_delay(
                trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
 
                ip->i_df.if_lastex = idx;
-               /* DELTA: One in-core extent grew into a hole. */
-               temp2 = temp;
-               temp = new->br_startoff;
                break;
 
        case 0:
@@ -1876,9 +1762,6 @@ xfs_bmap_add_extent_hole_delay(
                oldlen = newlen = 0;
                xfs_iext_insert(ip, idx, 1, new, state);
                ip->i_df.if_lastex = idx;
-               /* DELTA: A new in-core extent was added in a hole. */
-               temp2 = new->br_blockcount;
-               temp = new->br_startoff;
                break;
        }
        if (oldlen != newlen) {
@@ -1889,13 +1772,6 @@ xfs_bmap_add_extent_hole_delay(
                 * Nothing to do for disk quota accounting here.
                 */
        }
-       if (delta) {
-               temp2 += temp;
-               if (delta->xed_startoff > temp)
-                       delta->xed_startoff = temp;
-               if (delta->xed_blockcount < temp2)
-                       delta->xed_blockcount = temp2;
-       }
        *logflagsp = 0;
        return 0;
 }
@@ -1911,7 +1787,6 @@ xfs_bmap_add_extent_hole_real(
        xfs_btree_cur_t         *cur,   /* if null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     whichfork) /* data or attr fork */
 {
        xfs_bmbt_rec_host_t     *ep;    /* pointer to extent entry ins. point */
@@ -1922,8 +1797,6 @@ xfs_bmap_add_extent_hole_real(
        xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
        int                     rval=0; /* return value (logging flags) */
        int                     state;  /* state bits, accessed thru macros */
-       xfs_filblks_t           temp=0;
-       xfs_filblks_t           temp2=0;
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
        ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
@@ -2020,11 +1893,6 @@ xfs_bmap_add_extent_hole_real(
                                        left.br_state)))
                                goto done;
                }
-               /* DELTA: Two in-core extents were replaced by one. */
-               temp = left.br_startoff;
-               temp2 = left.br_blockcount +
-                       new->br_blockcount +
-                       right.br_blockcount;
                break;
 
        case BMAP_LEFT_CONTIG:
@@ -2056,10 +1924,6 @@ xfs_bmap_add_extent_hole_real(
                                        left.br_state)))
                                goto done;
                }
-               /* DELTA: One in-core extent grew. */
-               temp = left.br_startoff;
-               temp2 = left.br_blockcount +
-                       new->br_blockcount;
                break;
 
        case BMAP_RIGHT_CONTIG:
@@ -2092,10 +1956,6 @@ xfs_bmap_add_extent_hole_real(
                                        right.br_state)))
                                goto done;
                }
-               /* DELTA: One in-core extent grew. */
-               temp = new->br_startoff;
-               temp2 = new->br_blockcount +
-                       right.br_blockcount;
                break;
 
        case 0:
@@ -2123,18 +1983,8 @@ xfs_bmap_add_extent_hole_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               /* DELTA: A new extent was added in a hole. */
-               temp = new->br_startoff;
-               temp2 = new->br_blockcount;
                break;
        }
-       if (delta) {
-               temp2 += temp;
-               if (delta->xed_startoff > temp)
-                       delta->xed_startoff = temp;
-               if (delta->xed_blockcount < temp2)
-                       delta->xed_blockcount = temp2;
-       }
 done:
        *logflagsp = rval;
        return error;
@@ -2959,7 +2809,6 @@ xfs_bmap_del_extent(
        xfs_btree_cur_t         *cur,   /* if null, not a btree */
        xfs_bmbt_irec_t         *del,   /* data to remove from extents */
        int                     *logflagsp, /* inode logging flags */
-       xfs_extdelta_t          *delta, /* Change made to incore extents */
        int                     whichfork, /* data or attr fork */
        int                     rsvd)   /* OK to allocate reserved blocks */
 {
@@ -3265,14 +3114,6 @@ xfs_bmap_del_extent(
        if (da_old > da_new)
                xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
                        rsvd);
-       if (delta) {
-               /* DELTA: report the original extent. */
-               if (delta->xed_startoff > got.br_startoff)
-                       delta->xed_startoff = got.br_startoff;
-               if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
-                       delta->xed_blockcount = got.br_startoff +
-                                                       got.br_blockcount;
-       }
 done:
        *logflagsp = flags;
        return error;
@@ -3754,9 +3595,10 @@ xfs_bmap_add_attrfork(
                ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
        }
        ASSERT(ip->i_d.di_anextents == 0);
-       IHOLD(ip);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
        switch (ip->i_d.di_format) {
        case XFS_DINODE_FMT_DEV:
                ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
@@ -4483,8 +4325,7 @@ xfs_bmapi(
        xfs_extlen_t    total,          /* total blocks needed */
        xfs_bmbt_irec_t *mval,          /* output: map values */
        int             *nmap,          /* i/o: mval size/count */
-       xfs_bmap_free_t *flist,         /* i/o: list extents to free */
-       xfs_extdelta_t  *delta)         /* o: change made to incore extents */
+       xfs_bmap_free_t *flist)         /* i/o: list extents to free */
 {
        xfs_fsblock_t   abno;           /* allocated block number */
        xfs_extlen_t    alen;           /* allocated extent length */
@@ -4596,10 +4437,7 @@ xfs_bmapi(
        end = bno + len;
        obno = bno;
        bma.ip = NULL;
-       if (delta) {
-               delta->xed_startoff = NULLFILEOFF;
-               delta->xed_blockcount = 0;
-       }
+
        while (bno < end && n < *nmap) {
                /*
                 * Reading past eof, act as though there's a hole
@@ -4620,19 +4458,13 @@ xfs_bmapi(
                         * allocate the stuff asked for in this bmap call
                         * but that wouldn't be as good.
                         */
-                       if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
+                       if (wasdelay) {
                                alen = (xfs_extlen_t)got.br_blockcount;
                                aoff = got.br_startoff;
                                if (lastx != NULLEXTNUM && lastx) {
                                        ep = xfs_iext_get_ext(ifp, lastx - 1);
                                        xfs_bmbt_get_all(ep, &prev);
                                }
-                       } else if (wasdelay) {
-                               alen = (xfs_extlen_t)
-                                       XFS_FILBLKS_MIN(len,
-                                               (got.br_startoff +
-                                                got.br_blockcount) - bno);
-                               aoff = bno;
                        } else {
                                alen = (xfs_extlen_t)
                                        XFS_FILBLKS_MIN(len, MAXEXTLEN);
@@ -4831,7 +4663,7 @@ xfs_bmapi(
                                        got.br_state = XFS_EXT_UNWRITTEN;
                        }
                        error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
-                               firstblock, flist, &tmp_logflags, delta,
+                               firstblock, flist, &tmp_logflags,
                                whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
                        logflags |= tmp_logflags;
                        if (error)
@@ -4927,7 +4759,7 @@ xfs_bmapi(
                        }
                        mval->br_state = XFS_EXT_NORM;
                        error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
-                               firstblock, flist, &tmp_logflags, delta,
+                               firstblock, flist, &tmp_logflags,
                                whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
                        logflags |= tmp_logflags;
                        if (error)
@@ -5017,14 +4849,6 @@ xfs_bmapi(
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
               XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
        error = 0;
-       if (delta && delta->xed_startoff != NULLFILEOFF) {
-               /* A change was actually made.
-                * Note that delta->xed_blockount is an offset at this
-                * point and needs to be converted to a block count.
-                */
-               ASSERT(delta->xed_blockcount > delta->xed_startoff);
-               delta->xed_blockcount -= delta->xed_startoff;
-       }
 error0:
        /*
         * Log everything.  Do this after conversion, there's no point in
@@ -5136,8 +4960,6 @@ xfs_bunmapi(
        xfs_fsblock_t           *firstblock,    /* first allocated block
                                                   controls a.g. for allocs */
        xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
-       xfs_extdelta_t          *delta,         /* o: change made to incore
-                                                  extents */
        int                     *done)          /* set if not done yet */
 {
        xfs_btree_cur_t         *cur;           /* bmap btree cursor */
@@ -5196,10 +5018,7 @@ xfs_bunmapi(
        bno = start + len - 1;
        ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
                &prev);
-       if (delta) {
-               delta->xed_startoff = NULLFILEOFF;
-               delta->xed_blockcount = 0;
-       }
+
        /*
         * Check to see if the given block number is past the end of the
         * file, back up to the last block if so...
@@ -5297,7 +5116,7 @@ xfs_bunmapi(
                        }
                        del.br_state = XFS_EXT_UNWRITTEN;
                        error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
-                               firstblock, flist, &logflags, delta,
+                               firstblock, flist, &logflags,
                                XFS_DATA_FORK, 0);
                        if (error)
                                goto error0;
@@ -5352,7 +5171,7 @@ xfs_bunmapi(
                                prev.br_state = XFS_EXT_UNWRITTEN;
                                error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
                                        &prev, firstblock, flist, &logflags,
-                                       delta, XFS_DATA_FORK, 0);
+                                       XFS_DATA_FORK, 0);
                                if (error)
                                        goto error0;
                                goto nodelete;
@@ -5361,7 +5180,7 @@ xfs_bunmapi(
                                del.br_state = XFS_EXT_UNWRITTEN;
                                error = xfs_bmap_add_extent(ip, lastx, &cur,
                                        &del, firstblock, flist, &logflags,
-                                       delta, XFS_DATA_FORK, 0);
+                                       XFS_DATA_FORK, 0);
                                if (error)
                                        goto error0;
                                goto nodelete;
@@ -5414,7 +5233,7 @@ xfs_bunmapi(
                        goto error0;
                }
                error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
-                               &tmp_logflags, delta, whichfork, rsvd);
+                               &tmp_logflags, whichfork, rsvd);
                logflags |= tmp_logflags;
                if (error)
                        goto error0;
@@ -5471,14 +5290,6 @@ nodelete:
        ASSERT(ifp->if_ext_max ==
               XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
        error = 0;
-       if (delta && delta->xed_startoff != NULLFILEOFF) {
-               /* A change was actually made.
-                * Note that delta->xed_blockount is an offset at this
-                * point and needs to be converted to a block count.
-                */
-               ASSERT(delta->xed_blockcount > delta->xed_startoff);
-               delta->xed_blockcount -= delta->xed_startoff;
-       }
 error0:
        /*
         * Log everything.  Do this after conversion, there's no point in
@@ -5605,28 +5416,6 @@ xfs_getbmap(
                prealloced = 0;
                fixlen = 1LL << 32;
        } else {
-               /*
-                * If the BMV_IF_NO_DMAPI_READ interface bit specified, do
-                * not generate a DMAPI read event.  Otherwise, if the
-                * DM_EVENT_READ bit is set for the file, generate a read
-                * event in order that the DMAPI application may do its thing
-                * before we return the extents.  Usually this means restoring
-                * user file data to regions of the file that look like holes.
-                *
-                * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
-                * BMV_IF_NO_DMAPI_READ so that read events are generated.
-                * If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
-                * could misinterpret holes in a DMAPI file as true holes,
-                * when in fact they may represent offline user data.
-                */
-               if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
-                   !(iflags & BMV_IF_NO_DMAPI_READ)) {
-                       error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
-                                             0, 0, 0, NULL);
-                       if (error)
-                               return XFS_ERROR(error);
-               }
-
                if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
                    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
                    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
@@ -5713,7 +5502,7 @@ xfs_getbmap(
                error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
                                  XFS_BB_TO_FSB(mp, bmv->bmv_length),
                                  bmapi_flags, NULL, 0, map, &nmap,
-                                 NULL, NULL);
+                                 NULL);
                if (error)
                        goto out_free_map;
                ASSERT(nmap <= subnex);
@@ -5859,66 +5648,34 @@ xfs_bmap_eof(
 }
 
 #ifdef DEBUG
-STATIC
-xfs_buf_t *
+STATIC struct xfs_buf *
 xfs_bmap_get_bp(
-       xfs_btree_cur_t         *cur,
+       struct xfs_btree_cur    *cur,
        xfs_fsblock_t           bno)
 {
-       int i;
-       xfs_buf_t *bp;
+       struct xfs_log_item_desc *lidp;
+       int                     i;
 
        if (!cur)
-               return(NULL);
-
-       bp = NULL;
-       for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
-               bp = cur->bc_bufs[i];
-               if (!bp) break;
-               if (XFS_BUF_ADDR(bp) == bno)
-                       break;  /* Found it */
-       }
-       if (i == XFS_BTREE_MAXLEVELS)
-               bp = NULL;
-
-       if (!bp) { /* Chase down all the log items to see if the bp is there */
-               xfs_log_item_chunk_t    *licp;
-               xfs_trans_t             *tp;
-
-               tp = cur->bc_tp;
-               licp = &tp->t_items;
-               while (!bp && licp != NULL) {
-                       if (xfs_lic_are_all_free(licp)) {
-                               licp = licp->lic_next;
-                               continue;
-                       }
-                       for (i = 0; i < licp->lic_unused; i++) {
-                               xfs_log_item_desc_t     *lidp;
-                               xfs_log_item_t          *lip;
-                               xfs_buf_log_item_t      *bip;
-                               xfs_buf_t               *lbp;
-
-                               if (xfs_lic_isfree(licp, i)) {
-                                       continue;
-                               }
-
-                               lidp = xfs_lic_slot(licp, i);
-                               lip = lidp->lid_item;
-                               if (lip->li_type != XFS_LI_BUF)
-                                       continue;
+               return NULL;
 
-                               bip = (xfs_buf_log_item_t *)lip;
-                               lbp = bip->bli_buf;
+       for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
+               if (!cur->bc_bufs[i])
+                       break;
+               if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
+                       return cur->bc_bufs[i];
+       }
 
-                               if (XFS_BUF_ADDR(lbp) == bno) {
-                                       bp = lbp;
-                                       break; /* Found it */
-                               }
-                       }
-                       licp = licp->lic_next;
-               }
+       /* Chase down all the log items to see if the bp is there */
+       list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
+               struct xfs_buf_log_item *bip;
+               bip = (struct xfs_buf_log_item *)lidp->lid_item;
+               if (bip->bli_item.li_type == XFS_LI_BUF &&
+                   XFS_BUF_ADDR(bip->bli_buf) == bno)
+                       return bip->bli_buf;
        }
-       return(bp);
+
+       return NULL;
 }
 
 STATIC void
index 419dafb9d87d4a2a79b440c30a54e672a571b55f..b13569a6179b7235f315fe87f624e42336310c9e 100644 (file)
@@ -27,20 +27,6 @@ struct xfs_trans;
 
 extern kmem_zone_t     *xfs_bmap_free_item_zone;
 
-/*
- * DELTA: describe a change to the in-core extent list.
- *
- * Internally the use of xed_blockount is somewhat funky.
- * xed_blockcount contains an offset much of the time because this
- * makes merging changes easier.  (xfs_fileoff_t and xfs_filblks_t are
- * the same underlying type).
- */
-typedef struct xfs_extdelta
-{
-       xfs_fileoff_t           xed_startoff;   /* offset of range */
-       xfs_filblks_t           xed_blockcount; /* blocks in range */
-} xfs_extdelta_t;
-
 /*
  * List of extents to be free "later".
  * The list is kept sorted on xbf_startblock.
@@ -82,16 +68,13 @@ typedef     struct xfs_bmap_free
 #define XFS_BMAPI_DELAY                0x002   /* delayed write operation */
 #define XFS_BMAPI_ENTIRE       0x004   /* return entire extent, not trimmed */
 #define XFS_BMAPI_METADATA     0x008   /* mapping metadata not user data */
-#define XFS_BMAPI_EXACT                0x010   /* allocate only to spec'd bounds */
-#define XFS_BMAPI_ATTRFORK     0x020   /* use attribute fork not data */
-#define XFS_BMAPI_ASYNC                0x040   /* bunmapi xactions can be async */
-#define XFS_BMAPI_RSVBLOCKS    0x080   /* OK to alloc. reserved data blocks */
-#define        XFS_BMAPI_PREALLOC      0x100   /* preallocation op: unwritten space */
-#define        XFS_BMAPI_IGSTATE       0x200   /* Ignore state - */
+#define XFS_BMAPI_ATTRFORK     0x010   /* use attribute fork not data */
+#define XFS_BMAPI_RSVBLOCKS    0x020   /* OK to alloc. reserved data blocks */
+#define        XFS_BMAPI_PREALLOC      0x040   /* preallocation op: unwritten space */
+#define        XFS_BMAPI_IGSTATE       0x080   /* Ignore state - */
                                        /* combine contig. space */
-#define        XFS_BMAPI_CONTIG        0x400   /* must allocate only one extent */
-/*     XFS_BMAPI_DIRECT_IO     0x800   */
-#define XFS_BMAPI_CONVERT      0x1000  /* unwritten extent conversion - */
+#define        XFS_BMAPI_CONTIG        0x100   /* must allocate only one extent */
+#define XFS_BMAPI_CONVERT      0x200   /* unwritten extent conversion - */
                                        /* need write cache flushing and no */
                                        /* additional allocation alignments */
 
@@ -100,9 +83,7 @@ typedef      struct xfs_bmap_free
        { XFS_BMAPI_DELAY,      "DELAY" }, \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
        { XFS_BMAPI_METADATA,   "METADATA" }, \
-       { XFS_BMAPI_EXACT,      "EXACT" }, \
        { XFS_BMAPI_ATTRFORK,   "ATTRFORK" }, \
-       { XFS_BMAPI_ASYNC,      "ASYNC" }, \
        { XFS_BMAPI_RSVBLOCKS,  "RSVBLOCKS" }, \
        { XFS_BMAPI_PREALLOC,   "PREALLOC" }, \
        { XFS_BMAPI_IGSTATE,    "IGSTATE" }, \
@@ -310,9 +291,7 @@ xfs_bmapi(
        xfs_extlen_t            total,          /* total blocks needed */
        struct xfs_bmbt_irec    *mval,          /* output: map values */
        int                     *nmap,          /* i/o: mval size/count */
-       xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
-       xfs_extdelta_t          *delta);        /* o: change made to incore
-                                                  extents */
+       xfs_bmap_free_t         *flist);        /* i/o: list extents to free */
 
 /*
  * Map file blocks to filesystem blocks, simple version.
@@ -346,8 +325,6 @@ xfs_bunmapi(
        xfs_fsblock_t           *firstblock,    /* first allocated block
                                                   controls a.g. for allocs */
        xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
-       xfs_extdelta_t          *delta,         /* o: change made to incore
-                                                  extents */
        int                     *done);         /* set if not done yet */
 
 /*
index 416e47e54b83fc79f2472f59de81d8a34769e940..87d3c10b695437da9043d9a72985b8d8d25280c6 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_btree.h"
 #include "xfs_btree_trace.h"
-#include "xfs_ialloc.h"
 #include "xfs_itable.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
index 96be4b0f249613fef3b0cc8ab0ae443dafc3d2f4..829af92f0fbadd0f4e311b8151055cc9e135e969 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_btree.h"
 #include "xfs_btree_trace.h"
-#include "xfs_ialloc.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 
index 02a80984aa05f13189f4c99f186f1f098fa02e3d..1b09d7a280dfa6f1e1abcf247f27989da5fd6fc4 100644 (file)
@@ -24,7 +24,6 @@
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 
 kmem_zone_t    *xfs_buf_item_zone;
 
+static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
+{
+       return container_of(lip, struct xfs_buf_log_item, bli_item);
+}
+
+
 #ifdef XFS_TRANS_DEBUG
 /*
  * This function uses an alternate strategy for tracking the bytes
@@ -151,12 +156,13 @@ STATIC void       xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
  */
 STATIC uint
 xfs_buf_item_size(
-       xfs_buf_log_item_t      *bip)
+       struct xfs_log_item     *lip)
 {
-       uint            nvecs;
-       int             next_bit;
-       int             last_bit;
-       xfs_buf_t       *bp;
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       struct xfs_buf          *bp = bip->bli_buf;
+       uint                    nvecs;
+       int                     next_bit;
+       int                     last_bit;
 
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        if (bip->bli_flags & XFS_BLI_STALE) {
@@ -170,7 +176,6 @@ xfs_buf_item_size(
                return 1;
        }
 
-       bp = bip->bli_buf;
        ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
        nvecs = 1;
        last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
@@ -219,13 +224,13 @@ xfs_buf_item_size(
  */
 STATIC void
 xfs_buf_item_format(
-       xfs_buf_log_item_t      *bip,
-       xfs_log_iovec_t         *log_vector)
+       struct xfs_log_item     *lip,
+       struct xfs_log_iovec    *vecp)
 {
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       struct xfs_buf  *bp = bip->bli_buf;
        uint            base_size;
        uint            nvecs;
-       xfs_log_iovec_t *vecp;
-       xfs_buf_t       *bp;
        int             first_bit;
        int             last_bit;
        int             next_bit;
@@ -235,8 +240,6 @@ xfs_buf_item_format(
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
               (bip->bli_flags & XFS_BLI_STALE));
-       bp = bip->bli_buf;
-       vecp = log_vector;
 
        /*
         * The size of the base structure is the size of the
@@ -248,7 +251,7 @@ xfs_buf_item_format(
        base_size =
                (uint)(sizeof(xfs_buf_log_format_t) +
                       ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
-       vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
+       vecp->i_addr = &bip->bli_format;
        vecp->i_len = base_size;
        vecp->i_type = XLOG_REG_TYPE_BFORMAT;
        vecp++;
@@ -263,7 +266,7 @@ xfs_buf_item_format(
         */
        if (bip->bli_flags & XFS_BLI_INODE_BUF) {
                if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
-                     xfs_log_item_in_current_chkpt(&bip->bli_item)))
+                     xfs_log_item_in_current_chkpt(lip)))
                        bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
                bip->bli_flags &= ~XFS_BLI_INODE_BUF;
        }
@@ -356,66 +359,90 @@ xfs_buf_item_format(
 
 /*
  * This is called to pin the buffer associated with the buf log item in memory
- * so it cannot be written out.  Simply call bpin() on the buffer to do this.
+ * so it cannot be written out.
  *
  * We also always take a reference to the buffer log item here so that the bli
  * is held while the item is pinned in memory. This means that we can
  * unconditionally drop the reference count a transaction holds when the
  * transaction is completed.
  */
-
 STATIC void
 xfs_buf_item_pin(
-       xfs_buf_log_item_t      *bip)
+       struct xfs_log_item     *lip)
 {
-       xfs_buf_t       *bp;
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
 
-       bp = bip->bli_buf;
-       ASSERT(XFS_BUF_ISBUSY(bp));
+       ASSERT(XFS_BUF_ISBUSY(bip->bli_buf));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
               (bip->bli_flags & XFS_BLI_STALE));
-       atomic_inc(&bip->bli_refcount);
+
        trace_xfs_buf_item_pin(bip);
-       xfs_bpin(bp);
-}
 
+       atomic_inc(&bip->bli_refcount);
+       atomic_inc(&bip->bli_buf->b_pin_count);
+}
 
 /*
  * This is called to unpin the buffer associated with the buf log
  * item which was previously pinned with a call to xfs_buf_item_pin().
- * Just call bunpin() on the buffer to do this.
  *
  * Also drop the reference to the buf item for the current transaction.
  * If the XFS_BLI_STALE flag is set and we are the last reference,
  * then free up the buf log item and unlock the buffer.
+ *
+ * If the remove flag is set we are called from uncommit in the
+ * forced-shutdown path.  If that is true and the reference count on
+ * the log item is going to drop to zero we need to free the item's
+ * descriptor in the transaction.
  */
 STATIC void
 xfs_buf_item_unpin(
-       xfs_buf_log_item_t      *bip)
+       struct xfs_log_item     *lip,
+       int                     remove)
 {
-       struct xfs_ail  *ailp;
-       xfs_buf_t       *bp;
-       int             freed;
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       xfs_buf_t       *bp = bip->bli_buf;
+       struct xfs_ail  *ailp = lip->li_ailp;
        int             stale = bip->bli_flags & XFS_BLI_STALE;
+       int             freed;
 
-       bp = bip->bli_buf;
-       ASSERT(bp != NULL);
        ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
        trace_xfs_buf_item_unpin(bip);
 
        freed = atomic_dec_and_test(&bip->bli_refcount);
-       ailp = bip->bli_item.li_ailp;
-       xfs_bunpin(bp);
+
+       if (atomic_dec_and_test(&bp->b_pin_count))
+               wake_up_all(&bp->b_waiters);
+
        if (freed && stale) {
                ASSERT(bip->bli_flags & XFS_BLI_STALE);
                ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
                ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
                ASSERT(XFS_BUF_ISSTALE(bp));
                ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+
                trace_xfs_buf_item_unpin_stale(bip);
 
+               if (remove) {
+                       /*
+                        * We have to remove the log item from the transaction
+                        * as we are about to release our reference to the
+                        * buffer.  If we don't, the unlock that occurs later
+                        * in xfs_trans_uncommit() will ry to reference the
+                        * buffer which we no longer have a hold on.
+                        */
+                       xfs_trans_del_item(lip);
+
+                       /*
+                        * Since the transaction no longer refers to the buffer,
+                        * the buffer should no longer refer to the transaction.
+                        */
+                       XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+               }
+
                /*
                 * If we get called here because of an IO error, we may
                 * or may not have the item on the AIL. xfs_trans_ail_delete()
@@ -436,48 +463,6 @@ xfs_buf_item_unpin(
        }
 }
 
-/*
- * this is called from uncommit in the forced-shutdown path.
- * we need to check to see if the reference count on the log item
- * is going to drop to zero.  If so, unpin will free the log item
- * so we need to free the item's descriptor (that points to the item)
- * in the transaction.
- */
-STATIC void
-xfs_buf_item_unpin_remove(
-       xfs_buf_log_item_t      *bip,
-       xfs_trans_t             *tp)
-{
-       /* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
-       if ((atomic_read(&bip->bli_refcount) == 1) &&
-           (bip->bli_flags & XFS_BLI_STALE)) {
-               /*
-                * yes -- We can safely do some work here and then call
-                * buf_item_unpin to do the rest because we are
-                * are holding the buffer locked so no one else will be
-                * able to bump up the refcount. We have to remove the
-                * log item from the transaction as we are about to release
-                * our reference to the buffer. If we don't, the unlock that
-                * occurs later in the xfs_trans_uncommit() will try to
-                * reference the buffer which we no longer have a hold on.
-                */
-               struct xfs_log_item_desc *lidp;
-
-               ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
-               trace_xfs_buf_item_unpin_stale(bip);
-
-               lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip);
-               xfs_trans_free_item(tp, lidp);
-
-               /*
-                * Since the transaction no longer refers to the buffer, the
-                * buffer should no longer refer to the transaction.
-                */
-               XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL);
-       }
-       xfs_buf_item_unpin(bip);
-}
-
 /*
  * This is called to attempt to lock the buffer associated with this
  * buf log item.  Don't sleep on the buffer lock.  If we can't get
@@ -488,11 +473,11 @@ xfs_buf_item_unpin_remove(
  */
 STATIC uint
 xfs_buf_item_trylock(
-       xfs_buf_log_item_t      *bip)
+       struct xfs_log_item     *lip)
 {
-       xfs_buf_t       *bp;
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       struct xfs_buf          *bp = bip->bli_buf;
 
-       bp = bip->bli_buf;
        if (XFS_BUF_ISPINNED(bp))
                return XFS_ITEM_PINNED;
        if (!XFS_BUF_CPSEMA(bp))
@@ -529,13 +514,12 @@ xfs_buf_item_trylock(
  */
 STATIC void
 xfs_buf_item_unlock(
-       xfs_buf_log_item_t      *bip)
+       struct xfs_log_item     *lip)
 {
-       int             aborted;
-       xfs_buf_t       *bp;
-       uint            hold;
-
-       bp = bip->bli_buf;
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       struct xfs_buf          *bp = bip->bli_buf;
+       int                     aborted;
+       uint                    hold;
 
        /* Clear the buffer's association with this transaction. */
        XFS_BUF_SET_FSPRIVATE2(bp, NULL);
@@ -546,7 +530,7 @@ xfs_buf_item_unlock(
         * (cancelled) buffers at unpin time, but we'll never go through the
         * pin/unpin cycle if we abort inside commit.
         */
-       aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0;
+       aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
 
        /*
         * Before possibly freeing the buf item, determine if we should
@@ -607,16 +591,16 @@ xfs_buf_item_unlock(
  */
 STATIC xfs_lsn_t
 xfs_buf_item_committed(
-       xfs_buf_log_item_t      *bip,
+       struct xfs_log_item     *lip,
        xfs_lsn_t               lsn)
 {
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+
        trace_xfs_buf_item_committed(bip);
 
-       if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
-           (bip->bli_item.li_lsn != 0)) {
-               return bip->bli_item.li_lsn;
-       }
-       return (lsn);
+       if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
+               return lip->li_lsn;
+       return lsn;
 }
 
 /*
@@ -626,15 +610,16 @@ xfs_buf_item_committed(
  */
 STATIC void
 xfs_buf_item_push(
-       xfs_buf_log_item_t      *bip)
+       struct xfs_log_item     *lip)
 {
-       xfs_buf_t       *bp;
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       struct xfs_buf          *bp = bip->bli_buf;
 
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+       ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
+
        trace_xfs_buf_item_push(bip);
 
-       bp = bip->bli_buf;
-       ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
        xfs_buf_relse(bp);
 }
 
@@ -646,22 +631,24 @@ xfs_buf_item_push(
  */
 STATIC void
 xfs_buf_item_pushbuf(
-       xfs_buf_log_item_t      *bip)
+       struct xfs_log_item     *lip)
 {
-       xfs_buf_t       *bp;
+       struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+       struct xfs_buf          *bp = bip->bli_buf;
 
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+       ASSERT(XFS_BUF_ISDELAYWRITE(bp));
+
        trace_xfs_buf_item_pushbuf(bip);
 
-       bp = bip->bli_buf;
-       ASSERT(XFS_BUF_ISDELAYWRITE(bp));
        xfs_buf_delwri_promote(bp);
        xfs_buf_relse(bp);
 }
 
-/* ARGSUSED */
 STATIC void
-xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
+xfs_buf_item_committing(
+       struct xfs_log_item     *lip,
+       xfs_lsn_t               commit_lsn)
 {
 }
 
@@ -669,21 +656,16 @@ xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
  * This is the ops vector shared by all buf log items.
  */
 static struct xfs_item_ops xfs_buf_item_ops = {
-       .iop_size       = (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
-       .iop_format     = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
-                                       xfs_buf_item_format,
-       .iop_pin        = (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
-       .iop_unpin      = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin,
-       .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
-                                       xfs_buf_item_unpin_remove,
-       .iop_trylock    = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
-       .iop_unlock     = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock,
-       .iop_committed  = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_buf_item_committed,
-       .iop_push       = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
-       .iop_pushbuf    = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
-       .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
-                                       xfs_buf_item_committing
+       .iop_size       = xfs_buf_item_size,
+       .iop_format     = xfs_buf_item_format,
+       .iop_pin        = xfs_buf_item_pin,
+       .iop_unpin      = xfs_buf_item_unpin,
+       .iop_trylock    = xfs_buf_item_trylock,
+       .iop_unlock     = xfs_buf_item_unlock,
+       .iop_committed  = xfs_buf_item_committed,
+       .iop_push       = xfs_buf_item_push,
+       .iop_pushbuf    = xfs_buf_item_pushbuf,
+       .iop_committing = xfs_buf_item_committing
 };
 
 
@@ -712,7 +694,6 @@ xfs_buf_item_init(
         */
        if (bp->b_mount != mp)
                bp->b_mount = mp;
-       XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
        if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
                lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
                if (lip->li_type == XFS_LI_BUF) {
@@ -1098,15 +1079,14 @@ xfs_buf_error_relse(
  * It is called by xfs_buf_iodone_callbacks() above which will take
  * care of cleaning up the buffer itself.
  */
-/* ARGSUSED */
 void
 xfs_buf_iodone(
-       xfs_buf_t               *bp,
-       xfs_buf_log_item_t      *bip)
+       struct xfs_buf          *bp,
+       struct xfs_log_item     *lip)
 {
-       struct xfs_ail          *ailp = bip->bli_item.li_ailp;
+       struct xfs_ail          *ailp = lip->li_ailp;
 
-       ASSERT(bip->bli_buf == bp);
+       ASSERT(BUF_ITEM(lip)->bli_buf == bp);
 
        xfs_buf_rele(bp);
 
@@ -1120,6 +1100,6 @@ xfs_buf_iodone(
         * Either way, AIL is useless if we're forcing a shutdown.
         */
        spin_lock(&ailp->xa_lock);
-       xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
-       xfs_buf_item_free(bip);
+       xfs_trans_ail_delete(ailp, lip);
+       xfs_buf_item_free(BUF_ITEM(lip));
 }
index f20bb472d582f1e5b711c1593958b3593ad2167c..0e2ed43f16c71ace4bde6152ef2eca33b2fa966d 100644 (file)
@@ -124,7 +124,7 @@ void        xfs_buf_attach_iodone(struct xfs_buf *,
                              void(*)(struct xfs_buf *, xfs_log_item_t *),
                              xfs_log_item_t *);
 void   xfs_buf_iodone_callbacks(struct xfs_buf *);
-void   xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *);
+void   xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
 
 #ifdef XFS_TRANS_DEBUG
 void
index 0ca556b4bf313bf8526769ff051f188df5a3066e..30fa0e206fba84a5279ed9f92f6c9b7035484f47 100644 (file)
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
 #include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
-#include "xfs_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
@@ -581,16 +576,14 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
        xfs_da_intnode_t *node;
        xfs_da_node_entry_t *btree;
        int tmp;
-       xfs_mount_t *mp;
 
        node = oldblk->bp->data;
-       mp = state->mp;
        ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
        ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
        ASSERT(newblk->blkno != 0);
        if (state->args->whichfork == XFS_DATA_FORK)
-               ASSERT(newblk->blkno >= mp->m_dirleafblk &&
-                      newblk->blkno < mp->m_dirfreeblk);
+               ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
+                      newblk->blkno < state->mp->m_dirfreeblk);
 
        /*
         * We may need to make some room before we insert the new node.
@@ -1601,7 +1594,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
                        xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
                        XFS_BMAPI_CONTIG,
                        args->firstblock, args->total, &map, &nmap,
-                       args->flist, NULL))) {
+                       args->flist))) {
                return error;
        }
        ASSERT(nmap <= 1);
@@ -1622,8 +1615,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
                                        xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
                                        XFS_BMAPI_METADATA,
                                        args->firstblock, args->total,
-                                       &mapp[mapi], &nmap, args->flist,
-                                       NULL))) {
+                                       &mapp[mapi], &nmap, args->flist))) {
                                kmem_free(mapp);
                                return error;
                        }
@@ -1884,7 +1876,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
                 */
                if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
                                xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
-                               0, args->firstblock, args->flist, NULL,
+                               0, args->firstblock, args->flist,
                                &done)) == ENOSPC) {
                        if (w != XFS_DATA_FORK)
                                break;
@@ -1989,7 +1981,7 @@ xfs_da_do_buf(
                                        nfsb,
                                        XFS_BMAPI_METADATA |
                                                xfs_bmapi_aflag(whichfork),
-                                       NULL, 0, mapp, &nmap, NULL, NULL)))
+                                       NULL, 0, mapp, &nmap, NULL)))
                                goto exit0;
                }
        } else {
index 7f159d2a429a97e8a71e0fd57298ec499519c064..3b9582c60a225117f7b6d4180e4287b0118375ed 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_itable.h"
 #include "xfs_dfrag.h"
 #include "xfs_error.h"
-#include "xfs_rw.h"
 #include "xfs_vnodeops.h"
 #include "xfs_trace.h"
 
@@ -425,11 +416,8 @@ xfs_swap_extents(
        }
 
 
-       IHOLD(ip);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-
-       IHOLD(tip);
-       xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+       xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+       xfs_trans_ijoin_ref(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
 
        xfs_trans_log_inode(tp, ip,  ilf_fields);
        xfs_trans_log_inode(tp, tip, tilf_fields);
index 42520f041265fea2d7df1d02bc8a4e3e82215337..a1321bc7f19210a3b4bd1d87ec7c9d5eae410b28 100644 (file)
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
@@ -382,7 +380,7 @@ xfs_readdir(
        int             rval;           /* return value */
        int             v;              /* type-checking value */
 
-       xfs_itrace_entry(dp);
+       trace_xfs_readdir(dp);
 
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return XFS_ERROR(EIO);
@@ -549,7 +547,7 @@ xfs_dir2_grow_inode(
        if ((error = xfs_bmapi(tp, dp, bno, count,
                        XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
                        args->firstblock, args->total, &map, &nmap,
-                       args->flist, NULL)))
+                       args->flist)))
                return error;
        ASSERT(nmap <= 1);
        if (nmap == 1) {
@@ -581,8 +579,7 @@ xfs_dir2_grow_inode(
                        if ((error = xfs_bmapi(tp, dp, b, c,
                                        XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
                                        args->firstblock, args->total,
-                                       &mapp[mapi], &nmap, args->flist,
-                                       NULL))) {
+                                       &mapp[mapi], &nmap, args->flist))) {
                                kmem_free(mapp);
                                return error;
                        }
@@ -715,7 +712,7 @@ xfs_dir2_shrink_inode(
         */
        if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs,
                        XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
-                       NULL, &done))) {
+                       &done))) {
                /*
                 * ENOSPC actually can happen if we're in a removename with
                 * no space reservation, and the resulting block removal
index 779a267b0a84c55a83764d25d469493b24ca0a7f..580d99cef9e7bb25a4344343cf080df23b3e51a3 100644 (file)
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
@@ -1073,10 +1071,10 @@ xfs_dir2_sf_to_block(
         */
 
        buf_len = dp->i_df.if_bytes;
-       buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
+       buf = kmem_alloc(buf_len, KM_SLEEP);
 
-       memcpy(buf, sfp, dp->i_df.if_bytes);
-       xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
+       memcpy(buf, sfp, buf_len);
+       xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
        dp->i_d.di_size = 0;
        xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
        /*
index 498f8d694330dba475aad5c1c1d1147f73bfab9c..921595b84f5bc613835c1e05601875b46b16c871 100644 (file)
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_dir2_data.h"
index e2d89854ec9ec4649f2016d3c2592832bb5ee403..504be8640e918250d5ea58a6df3d64c223655187 100644 (file)
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dir2_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
@@ -875,7 +873,7 @@ xfs_dir2_leaf_getdents(
                                        xfs_dir2_byte_to_da(mp,
                                                XFS_DIR2_LEAF_OFFSET) - map_off,
                                        XFS_BMAPI_METADATA, NULL, 0,
-                                       &map[map_valid], &nmap, NULL, NULL);
+                                       &map[map_valid], &nmap, NULL);
                                /*
                                 * Don't know if we should ignore this or
                                 * try to return an error.
index 78fc4d9ae7562d02ea2068132ac44a81e9dfa776..f9a0864b696afea29fec8567a906ac793c882455 100644 (file)
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_dir2.h"
-#include "xfs_dmapi.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h