unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned int auto_free:1; /* free on completion */
+ unsigned int start_all:1; /* nr_pages == 0 (all) writeback */
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
if (!wb_has_dirty_io(wb))
return;
+ /*
+ * All callers of this function want to start writeback of all
+ * dirty pages. Places like vmscan can call this at a very
+ * high frequency, causing pointless allocations of tons of
+ * work items and keeping the flusher threads busy retrieving
+ * that work. Ensure that we only allow one of them pending and
+ * inflight at the time. It doesn't matter if we race a little
+ * bit on this, so use the faster separate test/set bit variants.
+ */
+ if (test_bit(WB_start_all, &wb->state))
+ return;
+
+ set_bit(WB_start_all, &wb->state);
+
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
work = kzalloc(sizeof(*work),
GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (!work) {
+ clear_bit(WB_start_all, &wb->state);
trace_writeback_nowork(wb);
wb_wakeup(wb);
return;
work->range_cyclic = 1;
work->reason = reason;
work->auto_free = 1;
+ work->start_all = 1;
wb_queue_work(wb, work);
}
list_del_init(&work->list);
}
spin_unlock_bh(&wb->work_lock);
+
+ /*
+ * Once we start processing a work item that had !nr_pages,
+ * clear the wb state bit for that so we can allow more.
+ */
+ if (work && work->start_all)
+ clear_bit(WB_start_all, &wb->state);
+
return work;
}
WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+ WB_start_all, /* nr_pages == 0 (all) work pending */
};
enum wb_congested_state {