[XFS] Be more defensive with page flags (error/private) for metadata
authorNathan Scott <nathans@sgi.com>
Thu, 28 Sep 2006 01:03:13 +0000 (11:03 +1000)
committerTim Shimmin <tes@sgi.com>
Thu, 28 Sep 2006 01:03:13 +0000 (11:03 +1000)
buffers.

SGI-PV: 955302
SGI-Modid: xfs-linux-melb:xfs-kern:26801a

Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
fs/xfs/linux-2.6/xfs_buf.c

index 58b6599de6197726412ac8c82426d65eaa280bfc..9bbadafdcb00285a8644a350a0ef033c7889f139 100644 (file)
@@ -318,8 +318,12 @@ xfs_buf_free(
                if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
                        free_address(bp->b_addr - bp->b_offset);
 
-               for (i = 0; i < bp->b_page_count; i++)
-                       page_cache_release(bp->b_pages[i]);
+               for (i = 0; i < bp->b_page_count; i++) {
+                       struct page     *page = bp->b_pages[i];
+
+                       ASSERT(!PagePrivate(page));
+                       page_cache_release(page);
+               }
                _xfs_buf_free_pages(bp);
        } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
                 /*
@@ -400,6 +404,7 @@ _xfs_buf_lookup_pages(
                nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
                size -= nbytes;
 
+               ASSERT(!PagePrivate(page));
                if (!PageUptodate(page)) {
                        page_count--;
                        if (blocksize >= PAGE_CACHE_SIZE) {
@@ -1117,10 +1122,10 @@ xfs_buf_bio_end_io(
        do {
                struct page     *page = bvec->bv_page;
 
+               ASSERT(!PagePrivate(page));
                if (unlikely(bp->b_error)) {
                        if (bp->b_flags & XBF_READ)
                                ClearPageUptodate(page);
-                       SetPageError(page);
                } else if (blocksize >= PAGE_CACHE_SIZE) {
                        SetPageUptodate(page);
                } else if (!PagePrivate(page) &&