sched/fair: replace cfs_rq->rb_leftmost
[sfrench/cifs-2.6.git] / kernel / sched / fair.c
index 8bc0a883d19045145ad7b839c23ca2ec1dc96bfe..a5d83ed8dd824c180eede5643176d9274c672da7 100644 (file)
@@ -513,6 +513,7 @@ static inline int entity_before(struct sched_entity *a,
 static void update_min_vruntime(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *curr = cfs_rq->curr;
+       struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
 
        u64 vruntime = cfs_rq->min_vruntime;
 
@@ -523,10 +524,9 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
                        curr = NULL;
        }
 
-       if (cfs_rq->rb_leftmost) {
-               struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
-                                                  struct sched_entity,
-                                                  run_node);
+       if (leftmost) { /* non-empty tree */
+               struct sched_entity *se;
+               se = rb_entry(leftmost, struct sched_entity, run_node);
 
                if (!curr)
                        vruntime = se->vruntime;
@@ -547,10 +547,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
  */
 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+       struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
        struct rb_node *parent = NULL;
        struct sched_entity *entry;
-       int leftmost = 1;
+       bool leftmost = true;
 
        /*
         * Find the right place in the rbtree:
@@ -566,36 +566,23 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
                        link = &parent->rb_left;
                } else {
                        link = &parent->rb_right;
-                       leftmost = 0;
+                       leftmost = false;
                }
        }
 
-       /*
-        * Maintain a cache of leftmost tree entries (it is frequently
-        * used):
-        */
-       if (leftmost)
-               cfs_rq->rb_leftmost = &se->run_node;
-
        rb_link_node(&se->run_node, parent, link);
-       rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
+       rb_insert_color_cached(&se->run_node,
+                              &cfs_rq->tasks_timeline, leftmost);
 }
 
 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       if (cfs_rq->rb_leftmost == &se->run_node) {
-               struct rb_node *next_node;
-
-               next_node = rb_next(&se->run_node);
-               cfs_rq->rb_leftmost = next_node;
-       }
-
-       rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+       rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
 }
 
 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
 {
-       struct rb_node *left = cfs_rq->rb_leftmost;
+       struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
 
        if (!left)
                return NULL;
@@ -616,7 +603,7 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
 #ifdef CONFIG_SCHED_DEBUG
 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
-       struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
+       struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
 
        if (!last)
                return NULL;
@@ -9312,7 +9299,7 @@ static void set_curr_task_fair(struct rq *rq)
 
 void init_cfs_rq(struct cfs_rq *cfs_rq)
 {
-       cfs_rq->tasks_timeline = RB_ROOT;
+       cfs_rq->tasks_timeline = RB_ROOT_CACHED;
        cfs_rq->min_vruntime = (u64)(-(1LL << 20));
 #ifndef CONFIG_64BIT
        cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;