Merge branch 'sched/urgent' into sched/core, to avoid conflicts
authorIngo Molnar <mingo@kernel.org>
Wed, 7 May 2014 11:15:46 +0000 (13:15 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 7 May 2014 11:15:46 +0000 (13:15 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
22 files changed:
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/pktcdvd.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/s390/crypto/ap_bus.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/qla2xxx/qla_os.c
drivers/staging/android/binder.c
drivers/staging/lustre/lustre/llite/lloop.c
fs/ocfs2/cluster/heartbeat.c
include/linux/thread_info.h
kernel/locking/locktorture.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/workqueue.c
mm/huge_memory.c

index f70a230a2945225f89ae188909c7bc9db90bc32f..6cb1beb47c25d1d2a7db113ca9f173a9ef8b68d3 100644 (file)
@@ -548,7 +548,7 @@ static int loop_thread(void *data)
        struct loop_device *lo = data;
        struct bio *bio;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
        while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
 
index 3a70ea2f7cd69b2641302e6c44560f32245a078c..56a027d6115e0f5fa83c48070ff26ca6d0ba061e 100644 (file)
@@ -533,7 +533,7 @@ static int nbd_thread(void *data)
        struct nbd_device *nbd = data;
        struct request *req;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
        while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
                /* wait for something to do */
                wait_event_interruptible(nbd->waiting_wq,
index a2af73db187b694c3112bf1c6d08e4070596cd10..ef166ad2dbadc37bdd58a4e055409898291eea82 100644 (file)
@@ -1463,7 +1463,7 @@ static int kcdrwd(void *foobar)
        struct packet_data *pkt;
        long min_sleep_time, residue;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
        set_freezable();
 
        for (;;) {
index 1c4bb4f6ce932f95043385d1387ed14b789a9996..5d665680ae33fea5bbc74084d1d79d37f62c4f15 100644 (file)
@@ -1007,7 +1007,7 @@ static int ipmi_thread(void *data)
        struct timespec busy_until;
 
        ipmi_si_set_not_busy(&busy_until);
-       set_user_nice(current, 19);
+       set_user_nice(current, MAX_NICE);
        while (!kthread_should_stop()) {
                int busy_wait;
 
index ab3baa7f95082e0cabf03906354c69ae9ed8880b..8eec1653c9cc44ec5c338f61f49b74dedbedceef 100644 (file)
@@ -1803,7 +1803,7 @@ static int ap_poll_thread(void *data)
        int requests;
        struct ap_device *ap_dev;
 
-       set_user_nice(current, 19);
+       set_user_nice(current, MAX_NICE);
        while (1) {
                if (ap_suspend_flag)
                        return 0;
index 1d41f4b9114f8253e780d279799dad0ac0d27e04..f548430234663691b80f3e50ab94e2088bdf25ad 100644 (file)
@@ -464,7 +464,7 @@ static int bnx2fc_l2_rcv_thread(void *arg)
        struct fcoe_percpu_s *bg = arg;
        struct sk_buff *skb;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
                schedule();
@@ -602,7 +602,7 @@ int bnx2fc_percpu_io_thread(void *arg)
        struct bnx2fc_work *work, *tmp;
        LIST_HEAD(work_list);
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
                schedule();
index b5ffd280a1aefeab817953a506484c033066ae95..d6d491c2f00463c4f3d9f32a900e7b96cc9803b3 100644 (file)
@@ -1870,7 +1870,7 @@ int bnx2i_percpu_io_thread(void *arg)
        struct bnx2i_work *work, *tmp;
        LIST_HEAD(work_list);
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
        while (!kthread_should_stop()) {
                spin_lock_bh(&p->p_work_lock);
index d5e105b173f0cf121894fcb5105a5afedadc16d5..00ee0ed642aac717fd8c0b1e2976c860ccb664ff 100644 (file)
@@ -1872,7 +1872,7 @@ static int fcoe_percpu_receive_thread(void *arg)
 
        skb_queue_head_init(&tmp);
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
 retry:
        while (!kthread_should_stop()) {
index 23f5ba5e6472581d4aa69bef39d1ca25992502d4..8dd47689d58430a1147a389c1eeed1eea1afae1d 100644 (file)
@@ -4515,7 +4515,7 @@ static int ibmvfc_work(void *data)
        struct ibmvfc_host *vhost = data;
        int rc;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
        while (1) {
                rc = wait_event_interruptible(vhost->work_wait_q,
index fa764406df6872c2daa9fa3d2e75a9e5f2164c8b..2ebfb2bb0f425f78975ac2c678c9130834d500b0 100644 (file)
@@ -2213,7 +2213,7 @@ static int ibmvscsi_work(void *data)
        struct ibmvscsi_host_data *hostdata = data;
        int rc;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
        while (1) {
                rc = wait_event_interruptible(hostdata->work_wait_q,
index 59b51c529ba0f9666a7200928ea7b330626daac7..294c072e90835efbb72012f2789dc412bf8c9686 100644 (file)
@@ -731,7 +731,7 @@ lpfc_do_work(void *p)
        struct lpfc_hba *phba = p;
        int rc;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
        current->flags |= PF_NOFREEZE;
        phba->data_flags = 0;
 
index 19e99cc33724c526f30de23d0e98d0757cf2f99d..afc84814e9bb3b5db7682c4e5b7751edd8b051e4 100644 (file)
@@ -4828,7 +4828,7 @@ qla2x00_do_dpc(void *data)
        ha = (struct qla_hw_data *)data;
        base_vha = pci_get_drvdata(ha->pdev);
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
index cfe4bc8f05cb82ec83b4e127ba53f8e72968a85c..179b21b665042c6efd1d32df435cb1e5cea326e8 100644 (file)
@@ -441,7 +441,7 @@ static void binder_set_nice(long nice)
                     "%d: nice value %ld not allowed use %ld instead\n",
                      current->pid, nice, min_nice);
        set_user_nice(current, min_nice);
-       if (min_nice < 20)
+       if (min_nice <= MAX_NICE)
                return;
        binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
 }
index f78eda235c7ab43a561efb21cfc0b4c50be4bcbe..d4c2cd91add41272b815be7de32ccecc27d6d887 100644 (file)
@@ -407,7 +407,7 @@ static int loop_thread(void *data)
        int refcheck;
        int ret = 0;
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
        lo->lo_state = LLOOP_BOUND;
 
index bf482dfed14fecf17406a6aa2d517929d6834800..73039295d0d1f35205e060220521934afd33ff27 100644 (file)
@@ -1107,7 +1107,7 @@ static int o2hb_thread(void *data)
 
        mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
 
-       set_user_nice(current, -20);
+       set_user_nice(current, MIN_NICE);
 
        /* Pin node */
        o2nm_depend_this_node();
index fddbe2023a5d568717b3b90eabc692bb4612f9bd..cb0cec94fda3330c63bf233b380bbe9ee81fe2b6 100644 (file)
@@ -104,20 +104,6 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 #define test_thread_flag(flag) \
        test_ti_thread_flag(current_thread_info(), flag)
 
-static inline __deprecated void set_need_resched(void)
-{
-       /*
-        * Use of this function in deprecated.
-        *
-        * As of this writing there are only a few users in the DRM tree left
-        * all of which are wrong and can be removed without causing too much
-        * grief.
-        *
-        * The DRM people are aware and are working on removing the last few
-        * instances.
-        */
-}
-
 #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
 
 #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
index f26b1a18e34e03cb0582fbf2de16d008f9731d89..23343be46e91c16068a1131cc893acb40ae5b5d2 100644 (file)
@@ -216,7 +216,7 @@ static int lock_torture_writer(void *arg)
        static DEFINE_TORTURE_RANDOM(rand);
 
        VERBOSE_TOROUT_STRING("lock_torture_writer task started");
-       set_user_nice(current, 19);
+       set_user_nice(current, MAX_NICE);
 
        do {
                schedule_timeout_uninterruptible(1);
index 0fdb96de81a5b8a92c302961769cdefdb5cad915..5d859ec975c2caef20530e0cea4410d37a186f57 100644 (file)
@@ -5564,6 +5564,7 @@ static unsigned long scale_rt_power(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        u64 total, available, age_stamp, avg;
+       s64 delta;
 
        /*
         * Since we're reading these variables without serialization make sure
@@ -5572,7 +5573,11 @@ static unsigned long scale_rt_power(int cpu)
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
 
-       total = sched_avg_period() + (rq_clock(rq) - age_stamp);
+       delta = rq_clock(rq) - age_stamp;
+       if (unlikely(delta < 0))
+               delta = 0;
+
+       total = sched_avg_period() + delta;
 
        if (unlikely(total < avg)) {
                /* Ensures that power won't end up being negative */
@@ -6727,10 +6732,7 @@ static int idle_balance(struct rq *this_rq)
 
 out:
        /* Is there a task of a high priority class? */
-       if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
-           ((this_rq->stop && this_rq->stop->on_rq) ||
-            this_rq->dl.dl_nr_running ||
-            (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
+       if (this_rq->nr_running != this_rq->cfs.h_nr_running)
                pulled_task = -1;
 
        if (pulled_task) {
index bd2267ad404fa78de092b8bc8f228cfea2dca87b..7795e292f4c94d77bf1039d7143eed998914e664 100644 (file)
@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
        rt_rq->overloaded = 0;
        plist_head_init(&rt_rq->pushable_tasks);
 #endif
+       /* We start is dequeued state, because no RT tasks are queued */
+       rt_rq->rt_queued = 0;
 
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
@@ -112,6 +114,13 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
        return rt_se->rt_rq;
 }
 
+static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq = rt_se->rt_rq;
+
+       return rt_rq->rq;
+}
+
 void free_rt_sched_group(struct task_group *tg)
 {
        int i;
@@ -211,10 +220,16 @@ static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
        return container_of(rt_rq, struct rq, rt);
 }
 
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 {
        struct task_struct *p = rt_task_of(rt_se);
-       struct rq *rq = task_rq(p);
+
+       return task_rq(p);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       struct rq *rq = rq_of_rt_se(rt_se);
 
        return &rq->rt;
 }
@@ -391,6 +406,9 @@ static inline void set_post_schedule(struct rq *rq)
 }
 #endif /* CONFIG_SMP */
 
+static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
+static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
+
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
        return !list_empty(&rt_se->run_list);
@@ -452,8 +470,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
        rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_rq->rt_nr_running) {
-               if (rt_se && !on_rt_rq(rt_se))
+               if (!rt_se)
+                       enqueue_top_rt_rq(rt_rq);
+               else if (!on_rt_rq(rt_se))
                        enqueue_rt_entity(rt_se, false);
+
                if (rt_rq->highest_prio.curr < curr->prio)
                        resched_task(curr);
        }
@@ -466,10 +487,17 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
-       if (rt_se && on_rt_rq(rt_se))
+       if (!rt_se)
+               dequeue_top_rt_rq(rt_rq);
+       else if (on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se);
 }
 
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+
 static int rt_se_boosted(struct sched_rt_entity *rt_se)
 {
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
@@ -532,12 +560,23 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 
 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
-       if (rt_rq->rt_nr_running)
-               resched_task(rq_of_rt_rq(rt_rq)->curr);
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       if (!rt_rq->rt_nr_running)
+               return;
+
+       enqueue_top_rt_rq(rt_rq);
+       resched_task(rq->curr);
 }
 
 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
+       dequeue_top_rt_rq(rt_rq);
+}
+
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled;
 }
 
 static inline const struct cpumask *sched_rt_period_mask(void)
@@ -922,6 +961,38 @@ static void update_curr_rt(struct rq *rq)
        }
 }
 
+static void
+dequeue_top_rt_rq(struct rt_rq *rt_rq)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       BUG_ON(&rq->rt != rt_rq);
+
+       if (!rt_rq->rt_queued)
+               return;
+
+       BUG_ON(!rq->nr_running);
+
+       rq->nr_running -= rt_rq->rt_nr_running;
+       rt_rq->rt_queued = 0;
+}
+
+static void
+enqueue_top_rt_rq(struct rt_rq *rt_rq)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       BUG_ON(&rq->rt != rt_rq);
+
+       if (rt_rq->rt_queued)
+               return;
+       if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+               return;
+
+       rq->nr_running += rt_rq->rt_nr_running;
+       rt_rq->rt_queued = 1;
+}
+
 #if defined CONFIG_SMP
 
 static void
@@ -1044,13 +1115,24 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
 
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+static inline
+unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *group_rq = group_rt_rq(rt_se);
+
+       if (group_rq)
+               return group_rq->rt_nr_running;
+       else
+               return 1;
+}
+
 static inline
 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
        int prio = rt_se_prio(rt_se);
 
        WARN_ON(!rt_prio(prio));
-       rt_rq->rt_nr_running++;
+       rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
 
        inc_rt_prio(rt_rq, prio);
        inc_rt_migration(rt_se, rt_rq);
@@ -1062,7 +1144,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        WARN_ON(!rt_rq->rt_nr_running);
-       rt_rq->rt_nr_running--;
+       rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
 
        dec_rt_prio(rt_rq, rt_se_prio(rt_se));
        dec_rt_migration(rt_se, rt_rq);
@@ -1119,6 +1201,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
                back = rt_se;
        }
 
+       dequeue_top_rt_rq(rt_rq_of_se(back));
+
        for (rt_se = back; rt_se; rt_se = rt_se->back) {
                if (on_rt_rq(rt_se))
                        __dequeue_rt_entity(rt_se);
@@ -1127,13 +1211,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
 
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
 {
+       struct rq *rq = rq_of_rt_se(rt_se);
+
        dequeue_rt_stack(rt_se);
        for_each_sched_rt_entity(rt_se)
                __enqueue_rt_entity(rt_se, head);
+       enqueue_top_rt_rq(&rq->rt);
 }
 
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
 {
+       struct rq *rq = rq_of_rt_se(rt_se);
+
        dequeue_rt_stack(rt_se);
 
        for_each_sched_rt_entity(rt_se) {
@@ -1142,6 +1231,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
                if (rt_rq && rt_rq->rt_nr_running)
                        __enqueue_rt_entity(rt_se, false);
        }
+       enqueue_top_rt_rq(&rq->rt);
 }
 
 /*
@@ -1159,8 +1249,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
-
-       inc_nr_running(rq);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1171,8 +1259,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
        dequeue_rt_entity(rt_se);
 
        dequeue_pushable_task(rq, p);
-
-       dec_nr_running(rq);
 }
 
 /*
@@ -1377,10 +1463,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
        if (prev->sched_class == &rt_sched_class)
                update_curr_rt(rq);
 
-       if (!rt_rq->rt_nr_running)
-               return NULL;
-
-       if (rt_rq_throttled(rt_rq))
+       if (!rt_rq->rt_queued)
                return NULL;
 
        put_prev_task(rq, prev);
@@ -1892,9 +1975,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
         */
        if (p->on_rq && rq->curr != p) {
 #ifdef CONFIG_SMP
-               if (rq->rt.overloaded && push_rt_task(rq) &&
+               if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
                    /* Don't resched if we changed runqueues */
-                   rq != task_rq(p))
+                   push_rt_task(rq) && rq != task_rq(p))
                        check_resched = 0;
 #endif /* CONFIG_SMP */
                if (check_resched && p->prio < rq->curr->prio)
index 456e492a3dca37c13d7cb7b57a51965bfa18d6b3..b2cbe81308afe79c40944196f4fc398895bc676e 100644 (file)
@@ -409,6 +409,8 @@ struct rt_rq {
        int overloaded;
        struct plist_head pushable_tasks;
 #endif
+       int rt_queued;
+
        int rt_throttled;
        u64 rt_time;
        u64 rt_runtime;
@@ -423,18 +425,6 @@ struct rt_rq {
 #endif
 };
 
-#ifdef CONFIG_RT_GROUP_SCHED
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
-       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
-}
-#else
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
-       return rt_rq->rt_throttled;
-}
-#endif
-
 /* Deadline class' related fields in a runqueue */
 struct dl_rq {
        /* runqueue is an rbtree, ordered by deadline */
index 0ee63af30bd14a4ad7f4b8f846d19b100fd596b3..c30c01b32ecea6d9fea573551bb1b71fb6e543d2 100644 (file)
@@ -100,10 +100,10 @@ enum {
 
        /*
         * Rescue workers are used only on emergencies and shared by
-        * all cpus.  Give -20.
+        * all cpus.  Give MIN_NICE.
         */
-       RESCUER_NICE_LEVEL      = -20,
-       HIGHPRI_NICE_LEVEL      = -20,
+       RESCUER_NICE_LEVEL      = MIN_NICE,
+       HIGHPRI_NICE_LEVEL      = MIN_NICE,
 
        WQ_NAME_LEN             = 24,
 };
index b4b1feba64724234dee1b66a482c79a0cd3c0f95..d199d2d919467eeddbc82127f0ffbf780e947138 100644 (file)
@@ -2740,7 +2740,7 @@ static int khugepaged(void *none)
        struct mm_slot *mm_slot;
 
        set_freezable();
-       set_user_nice(current, 19);
+       set_user_nice(current, MAX_NICE);
 
        while (!kthread_should_stop()) {
                khugepaged_do_scan();