Merge branch 'akpm' (patches from Andrew)
[sfrench/cifs-2.6.git] / kernel / sched / fair.c
index 8213ff6e365d1699e4dae34ce1adfb385d82fec9..ea74d43924b25f7ae98788532d2070152b6cb5fc 100644 (file)
@@ -1173,7 +1173,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
 
        /* New address space, reset the preferred nid */
        if (!(clone_flags & CLONE_VM)) {
-               p->numa_preferred_nid = -1;
+               p->numa_preferred_nid = NUMA_NO_NODE;
                return;
        }
 
@@ -1193,13 +1193,13 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
 
 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
 {
-       rq->nr_numa_running += (p->numa_preferred_nid != -1);
+       rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
        rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
 }
 
 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 {
-       rq->nr_numa_running -= (p->numa_preferred_nid != -1);
+       rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
        rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
 }
 
@@ -1413,7 +1413,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
         * two full passes of the "multi-stage node selection" test that is
         * executed below.
         */
-       if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+       if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
            (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
                return true;
 
@@ -1861,7 +1861,7 @@ static void numa_migrate_preferred(struct task_struct *p)
        unsigned long interval = HZ;
 
        /* This task has no NUMA fault statistics yet */
-       if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
+       if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
                return;
 
        /* Periodically retry migrating the task to the preferred node */
@@ -2108,7 +2108,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
 
 static void task_numa_placement(struct task_struct *p)
 {
-       int seq, nid, max_nid = -1;
+       int seq, nid, max_nid = NUMA_NO_NODE;
        unsigned long max_faults = 0;
        unsigned long fault_types[2] = { 0, 0 };
        unsigned long total_faults;
@@ -2651,7 +2651,8 @@ static void update_scan_period(struct task_struct *p, int new_cpu)
                 * the preferred node.
                 */
                if (dst_nid == p->numa_preferred_nid ||
-                   (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+                   (p->numa_preferred_nid != NUMA_NO_NODE &&
+                       src_nid != p->numa_preferred_nid))
                        return;
        }