mm: have zonelist contains structs with both a zone pointer and zone_idx
[sfrench/cifs-2.6.git] / fs / buffer.c
index 7ba58386beee459779da2056d662572496ff2096..ac84cd13075d60142002897661ba067d60b65239 100644 (file)
@@ -360,16 +360,18 @@ void invalidate_bdev(struct block_device *bdev)
  */
 static void free_more_memory(void)
 {
-       struct zone **zones;
-       pg_data_t *pgdat;
+       struct zoneref *zrefs;
+       int nid;
 
        wakeup_pdflush(1024);
        yield();
 
-       for_each_online_pgdat(pgdat) {
-               zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
-               if (*zones)
-                       try_to_free_pages(zones, 0, GFP_NOFS);
+       for_each_online_node(nid) {
+               zrefs = first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+                                               gfp_zone(GFP_NOFS));
+               if (zrefs->zone)
+                       try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
+                                               GFP_NOFS);
        }
 }
 
@@ -1181,7 +1183,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 void mark_buffer_dirty(struct buffer_head *bh)
 {
        WARN_ON_ONCE(!buffer_uptodate(bh));
-       if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
+
+       /*
+        * Very *carefully* optimize the it-is-already-dirty case.
+        *
+        * Don't let the final "is it dirty" escape to before we
+        * perhaps modified the buffer.
+        */
+       if (buffer_dirty(bh)) {
+               smp_mb();
+               if (buffer_dirty(bh))
+                       return;
+       }
+
+       if (!test_set_buffer_dirty(bh))
                __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
 }
 
@@ -2564,14 +2579,13 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
        struct inode *inode = page->mapping->host;
        struct buffer_head *head = fsdata;
        struct buffer_head *bh;
+       BUG_ON(fsdata != NULL && page_has_buffers(page));
 
-       if (!PageMappedToDisk(page)) {
-               if (unlikely(copied < len) && !page_has_buffers(page))
-                       attach_nobh_buffers(page, head);
-               if (page_has_buffers(page))
-                       return generic_write_end(file, mapping, pos, len,
-                                               copied, page, fsdata);
-       }
+       if (unlikely(copied < len) && !page_has_buffers(page))
+               attach_nobh_buffers(page, head);
+       if (page_has_buffers(page))
+               return generic_write_end(file, mapping, pos, len,
+                                       copied, page, fsdata);
 
        SetPageUptodate(page);
        set_page_dirty(page);
@@ -3168,8 +3182,7 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
-                               set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
+       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;