mm: dirty balancing for tasks
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 17 Oct 2007 06:25:50 +0000 (23:25 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 17 Oct 2007 15:42:45 +0000 (08:42 -0700)
Based on ideas of Andrew:
  http://marc.info/?l=linux-kernel&m=102912915020543&w=2

Scale the bdi dirty limit inversly with the tasks dirty rate.
This makes heavy writers have a lower dirty limit than the occasional writer.

Andrea proposed something similar:
  http://lwn.net/Articles/152277/

The main disadvantage to his patch is that he uses an unrelated quantity to
measure time, which leaves him with a workload dependant tunable. Other than
that the two approaches appear quite similar.

[akpm@linux-foundation.org: fix warning]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/init_task.h
include/linux/sched.h
kernel/fork.c
mm/page-writeback.c

index 513bc3e489f018f33495867568d708a9335b0c06..3a619f57a2b2afa1f37acf12000a2e82f9a6faf4 100644 (file)
@@ -171,6 +171,7 @@ extern struct group_info init_groups;
                [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),           \
                [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),            \
        },                                                              \
+       .dirties = INIT_PROP_LOCAL_SINGLE(dirties),                     \
        INIT_TRACE_IRQFLAGS                                             \
        INIT_LOCKDEP                                                    \
 }
index 592e3a55f818fe328222a6051d6a51c5c15ecfae..59738efff8ad92c9aa7b5fa3bef6848f5309d882 100644 (file)
@@ -74,6 +74,7 @@ struct sched_param {
 #include <linux/pid.h>
 #include <linux/percpu.h>
 #include <linux/topology.h>
+#include <linux/proportions.h>
 #include <linux/seccomp.h>
 #include <linux/rcupdate.h>
 #include <linux/futex.h>
@@ -1149,6 +1150,7 @@ struct task_struct {
 #ifdef CONFIG_FAULT_INJECTION
        int make_it_fail;
 #endif
+       struct prop_local_single dirties;
 };
 
 /*
index 3fc3c1383912a8cf105ddbb218b00dfa88d76233..163325af8179adc71c9fbce52a4ffcb6fe9bac56 100644 (file)
@@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
 
 void free_task(struct task_struct *tsk)
 {
+       prop_local_destroy_single(&tsk->dirties);
        free_thread_info(tsk->stack);
        rt_mutex_debug_task_free(tsk);
        free_task_struct(tsk);
@@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
 {
        struct task_struct *tsk;
        struct thread_info *ti;
+       int err;
 
        prepare_to_copy(orig);
 
@@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
 
        *tsk = *orig;
        tsk->stack = ti;
+
+       err = prop_local_init_single(&tsk->dirties);
+       if (err) {
+               free_thread_info(ti);
+               free_task_struct(tsk);
+               return NULL;
+       }
+
        setup_thread_stack(tsk, orig);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
index b0360546ac8697c1beaff49c28271e4278852070..4073d531cd7b6ad8486330f02edcea9a16e9d5e7 100644 (file)
@@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages);
  *
  */
 static struct prop_descriptor vm_completions;
+static struct prop_descriptor vm_dirties;
 
 static unsigned long determine_dirtyable_memory(void);
 
@@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
        if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
                int shift = calc_period_shift();
                prop_change_shift(&vm_completions, shift);
+               prop_change_shift(&vm_dirties, shift);
        }
        return ret;
 }
@@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
        __prop_inc_percpu(&vm_completions, &bdi->completions);
 }
 
+static inline void task_dirty_inc(struct task_struct *tsk)
+{
+       prop_inc_single(&vm_dirties, &tsk->dirties);
+}
+
 /*
  * Obtain an accurate fraction of the BDI's portion.
  */
@@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
        *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
 }
 
+static inline void task_dirties_fraction(struct task_struct *tsk,
+               long *numerator, long *denominator)
+{
+       prop_fraction_single(&vm_dirties, &tsk->dirties,
+                               numerator, denominator);
+}
+
+/*
+ * scale the dirty limit
+ *
+ * task specific dirty limit:
+ *
+ *   dirty -= (dirty/8) * p_{t}
+ */
+void task_dirty_limit(struct task_struct *tsk, long *pdirty)
+{
+       long numerator, denominator;
+       long dirty = *pdirty;
+       u64 inv = dirty >> 3;
+
+       task_dirties_fraction(tsk, &numerator, &denominator);
+       inv *= numerator;
+       do_div(inv, denominator);
+
+       dirty -= inv;
+       if (dirty < *pdirty/2)
+               dirty = *pdirty/2;
+
+       *pdirty = dirty;
+}
+
 /*
  * Work out the current dirty-memory clamping and background writeout
  * thresholds.
@@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
 
                *pbdi_dirty = bdi_dirty;
                clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
+               task_dirty_limit(current, pbdi_dirty);
        }
 }
 
@@ -720,6 +759,7 @@ void __init page_writeback_init(void)
 
        shift = calc_period_shift();
        prop_descriptor_init(&vm_completions, shift);
+       prop_descriptor_init(&vm_dirties, shift);
 }
 
 /**
@@ -998,7 +1038,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
  * If the mapping doesn't provide a set_page_dirty a_op, then
  * just fall through and assume that it wants buffer_heads.
  */
-int fastcall set_page_dirty(struct page *page)
+static int __set_page_dirty(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
 
@@ -1016,6 +1056,14 @@ int fastcall set_page_dirty(struct page *page)
        }
        return 0;
 }
+
+int fastcall set_page_dirty(struct page *page)
+{
+       int ret = __set_page_dirty(page);
+       if (ret)
+               task_dirty_inc(current);
+       return ret;
+}
 EXPORT_SYMBOL(set_page_dirty);
 
 /*