sched/core: Provide a pointer to the valid CPU mask
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tue, 23 Apr 2019 14:26:36 +0000 (16:26 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 3 Jun 2019 09:49:37 +0000 (11:49 +0200)
In commit:

  4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper")

the tsk_nr_cpus_allowed() wrapper was removed. There was not
much difference in !RT but in RT we used this to implement
migrate_disable(). Within a migrate_disable() section the CPU mask is
restricted to single CPU while the "normal" CPU mask remains untouched.

As an alternative implementation Ingo suggested to use:

struct task_struct {
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
        };
with
t->cpus_ptr = &t->cpus_mask;

In -RT we then can switch the cpus_ptr to:

t->cpus_ptr = &cpumask_of(task_cpu(p));

in a migration disabled region. The rules are simple:

 - Code that 'uses' ->cpus_allowed would use the pointer.
 - Code that 'modifies' ->cpus_allowed would use the direct mask.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20190423142636.14347-1-bigeasy@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
23 files changed:
arch/ia64/kernel/mca.c
arch/mips/include/asm/switch_to.h
arch/mips/kernel/mips-mt-fpaff.c
arch/mips/kernel/traps.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/qib/qib_file_ops.c
fs/proc/array.c
include/linux/sched.h
init/init_task.c
kernel/cgroup/cpuset.c
kernel/fork.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpupri.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/trace/trace_hwlat.c
lib/smp_processor_id.c
samples/trace_events/trace-events-sample.c

index 6a52d761854b525e2d32cb66c600d4241c03004c..79190d877fa700c87d7b6c92eb47f5ac72c09ad4 100644 (file)
@@ -1831,7 +1831,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
        ti->cpu = cpu;
        p->stack = ti;
        p->state = TASK_UNINTERRUPTIBLE;
-       cpumask_set_cpu(cpu, &p->cpus_allowed);
+       cpumask_set_cpu(cpu, &p->cpus_mask);
        INIT_LIST_HEAD(&p->tasks);
        p->parent = p->real_parent = p->group_leader = p;
        INIT_LIST_HEAD(&p->children);
index 0f813bb753c6321de009affcaea2b44703b8cd88..09cbe9042828fea136c5b0d74b834b0e55712fb8 100644 (file)
@@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
  * inline to try to keep the overhead down. If we have been forced to run on
  * a "CPU" with an FPU because of a previous high level of FP computation,
  * but did not actually use the FPU during the most recent time-slice (CU1
- * isn't set), we undo the restriction on cpus_allowed.
+ * isn't set), we undo the restriction on cpus_mask.
  *
  * We're not calling set_cpus_allowed() here, because we have no need to
  * force prompt migration - we're already switching the current CPU to a
@@ -57,7 +57,7 @@ do {                                                                  \
            test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&             \
            (!(KSTK_STATUS(prev) & ST0_CU1))) {                         \
                clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);          \
-               prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
+               prev->cpus_mask = prev->thread.user_cpus_allowed;       \
        }                                                               \
        next->thread.emulated_fp = 0;                                   \
 } while(0)
index a7c0f97e4b0d6eea56db2f9eedad24a9158d669b..1a08428eedcf1d3b1c933d5996a4760468fc4348 100644 (file)
@@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
        if (retval)
                goto out_unlock;
 
-       cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+       cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
        cpumask_and(&mask, &allowed, cpu_active_mask);
 
 out_unlock:
index c52766a5b85f5ce8ac10bc104ac77354e1eb2885..ac7159263da071c393f3b8da886ea6b963484f5f 100644 (file)
@@ -891,12 +891,12 @@ static void mt_ase_fp_affinity(void)
                 * restricted the allowed set to exclude any CPUs with FPUs,
                 * we'll skip the procedure.
                 */
-               if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
+               if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
                        cpumask_t tmask;
 
                        current->thread.user_cpus_allowed
-                               = current->cpus_allowed;
-                       cpumask_and(&tmask, &current->cpus_allowed,
+                               = current->cpus_mask;
+                       cpumask_and(&tmask, &current->cpus_mask,
                                    &mt_fpu_cpumask);
                        set_cpus_allowed_ptr(current, &tmask);
                        set_thread_flag(TIF_FPUBOUND);
index e56b553de27b8460c929a07d49f3423c8bdc4a55..f18d5067cd0fa96df0f2d470354491bd55a0ac02 100644 (file)
@@ -128,7 +128,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
         * runqueue. The context will be rescheduled on the proper node
         * if it is timesliced or preempted.
         */
-       cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
+       cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
 
        /* Save the current cpu id for spu interrupt routing. */
        ctx->last_ran = raw_smp_processor_id();
index 604c0e3bcc830612fb505efe1a677ee0df7aac4e..f68baccc69f0abd9691e07181d972cf9d2fd1091 100644 (file)
@@ -1503,7 +1503,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
         * may be scheduled elsewhere and invalidate entries in the
         * pseudo-locked region.
         */
-       if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) {
+       if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) {
                mutex_unlock(&rdtgroup_mutex);
                return -EINVAL;
        }
index 4fe662c3bbc1cda401e95cf90afe366554e471c2..c142b23bb40183d61bc009a1904b65768bac711c 100644 (file)
@@ -1038,7 +1038,7 @@ int hfi1_get_proc_affinity(int node)
        struct hfi1_affinity_node *entry;
        cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
        const struct cpumask *node_mask,
-               *proc_mask = &current->cpus_allowed;
+               *proc_mask = current->cpus_ptr;
        struct hfi1_affinity_node_list *affinity = &node_affinity;
        struct cpu_mask_set *set = &affinity->proc;
 
@@ -1046,7 +1046,7 @@ int hfi1_get_proc_affinity(int node)
         * check whether process/context affinity has already
         * been set
         */
-       if (cpumask_weight(proc_mask) == 1) {
+       if (current->nr_cpus_allowed == 1) {
                hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
                          current->pid, current->comm,
                          cpumask_pr_args(proc_mask));
@@ -1057,7 +1057,7 @@ int hfi1_get_proc_affinity(int node)
                cpu = cpumask_first(proc_mask);
                cpumask_set_cpu(cpu, &set->used);
                goto done;
-       } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
+       } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
                hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
                          current->pid, current->comm,
                          cpumask_pr_args(proc_mask));
index b0110728f5418bb158b2af2ca0c7e440d24c3274..7e8139ee0cc1bdcaf49971b6b926d3cea045afbf 100644 (file)
@@ -855,14 +855,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
 {
        struct sdma_rht_node *rht_node;
        struct sdma_engine *sde = NULL;
-       const struct cpumask *current_mask = &current->cpus_allowed;
        unsigned long cpu_id;
 
        /*
         * To ensure that always the same sdma engine(s) will be
         * selected make sure the process is pinned to this CPU only.
         */
-       if (cpumask_weight(current_mask) != 1)
+       if (current->nr_cpus_allowed != 1)
                goto out;
 
        cpu_id = smp_processor_id();
index 78fa634de98a3db4a16e345007ee28bee0cf9c42..27b6e664e59de7e5c1465ac2384c2b862a20a425 100644 (file)
@@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
 static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
 {
        struct qib_filedata *fd = fp->private_data;
-       const unsigned int weight = cpumask_weight(&current->cpus_allowed);
+       const unsigned int weight = current->nr_cpus_allowed;
        const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
        int local_cpu;
 
@@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
                ret = find_free_ctxt(i_minor - 1, fp, uinfo);
        else {
                int unit;
-               const unsigned int cpu = cpumask_first(&current->cpus_allowed);
-               const unsigned int weight =
-                       cpumask_weight(&current->cpus_allowed);
+               const unsigned int cpu = cpumask_first(current->cpus_ptr);
+               const unsigned int weight = current->nr_cpus_allowed;
 
                if (weight == 1 && !test_bit(cpu, qib_cpulist))
                        if (!find_hca(cpu, &unit) && unit >= 0)
index 2edbb657f859bff9528eff4b83dc14da121b17e0..84908556ea58676996bd885141e078517dcb4d38 100644 (file)
@@ -381,9 +381,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
        seq_printf(m, "Cpus_allowed:\t%*pb\n",
-                  cpumask_pr_args(&task->cpus_allowed));
+                  cpumask_pr_args(task->cpus_ptr));
        seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
-                  cpumask_pr_args(&task->cpus_allowed));
+                  cpumask_pr_args(task->cpus_ptr));
 }
 
 static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
index 11837410690f01d72712287068953bd8729d464a..1b2590a8d038687016bee941218a4403c621d46e 100644 (file)
@@ -651,7 +651,8 @@ struct task_struct {
 
        unsigned int                    policy;
        int                             nr_cpus_allowed;
-       cpumask_t                       cpus_allowed;
+       const cpumask_t                 *cpus_ptr;
+       cpumask_t                       cpus_mask;
 
 #ifdef CONFIG_PREEMPT_RCU
        int                             rcu_read_lock_nesting;
@@ -1399,7 +1400,7 @@ extern struct pid *cad_pid;
 #define PF_SWAPWRITE           0x00800000      /* Allowed to write to swap */
 #define PF_MEMSTALL            0x01000000      /* Stalled due to lack of memory */
 #define PF_UMH                 0x02000000      /* I'm an Usermodehelper process */
-#define PF_NO_SETAFFINITY      0x04000000      /* Userland is not allowed to meddle with cpus_allowed */
+#define PF_NO_SETAFFINITY      0x04000000      /* Userland is not allowed to meddle with cpus_mask */
 #define PF_MCE_EARLY           0x08000000      /* Early kill for mce process policy */
 #define PF_MEMALLOC_NOCMA      0x10000000      /* All allocation request will have _GFP_MOVABLE cleared */
 #define PF_FREEZER_SKIP                0x40000000      /* Freezer should not count it as freezable */
index c70ef656d0f40cf4fc603d0a7dbb70ba1820af73..3c27c0efa31663c6baa0f49387cc4df7887a8369 100644 (file)
@@ -72,7 +72,8 @@ struct task_struct init_task
        .static_prio    = MAX_PRIO - 20,
        .normal_prio    = MAX_PRIO - 20,
        .policy         = SCHED_NORMAL,
-       .cpus_allowed   = CPU_MASK_ALL,
+       .cpus_ptr       = &init_task.cpus_mask,
+       .cpus_mask      = CPU_MASK_ALL,
        .nr_cpus_allowed= NR_CPUS,
        .mm             = NULL,
        .active_mm      = &init_mm,
index 6a1942ed781c5b1712021299fb8227d389126c00..fe90fa1899e6a6c7253c2553ee1e978418154227 100644 (file)
@@ -2829,7 +2829,7 @@ static void cpuset_fork(struct task_struct *task)
        if (task_css_is_root(task, cpuset_cgrp_id))
                return;
 
-       set_cpus_allowed_ptr(task, &current->cpus_allowed);
+       set_cpus_allowed_ptr(task, current->cpus_ptr);
        task->mems_allowed = current->mems_allowed;
 }
 
index 75675b9bf6dfd36066336c384e007de312f54004..6be686283e55b2f8555e72e661f2230ff48abc09 100644 (file)
@@ -894,6 +894,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 #ifdef CONFIG_STACKPROTECTOR
        tsk->stack_canary = get_random_canary();
 #endif
+       if (orig->cpus_ptr == &orig->cpus_mask)
+               tsk->cpus_ptr = &tsk->cpus_mask;
 
        /*
         * One for us, one for whoever does the "release_task()" (usually
index 874c427742a918e0c91ba6471b76fa0580e3638c..93ab85f0d07673846a1daadbc081d7a1276f6ef6 100644 (file)
@@ -930,7 +930,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
  */
 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
 {
-       if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+       if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                return false;
 
        if (is_per_cpu_kthread(p))
@@ -1025,7 +1025,7 @@ static int migration_cpu_stop(void *data)
        local_irq_disable();
        /*
         * We need to explicitly wake pending tasks before running
-        * __migrate_task() such that we will not miss enforcing cpus_allowed
+        * __migrate_task() such that we will not miss enforcing cpus_ptr
         * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
         */
        sched_ttwu_pending();
@@ -1056,7 +1056,7 @@ static int migration_cpu_stop(void *data)
  */
 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
 {
-       cpumask_copy(&p->cpus_allowed, new_mask);
+       cpumask_copy(&p->cpus_mask, new_mask);
        p->nr_cpus_allowed = cpumask_weight(new_mask);
 }
 
@@ -1126,7 +1126,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                goto out;
        }
 
-       if (cpumask_equal(&p->cpus_allowed, new_mask))
+       if (cpumask_equal(p->cpus_ptr, new_mask))
                goto out;
 
        if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
@@ -1286,10 +1286,10 @@ static int migrate_swap_stop(void *data)
        if (task_cpu(arg->src_task) != arg->src_cpu)
                goto unlock;
 
-       if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
+       if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
                goto unlock;
 
-       if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
+       if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
                goto unlock;
 
        __migrate_swap_task(arg->src_task, arg->dst_cpu);
@@ -1331,10 +1331,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
        if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
                goto out;
 
-       if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
+       if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
                goto out;
 
-       if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
+       if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
                goto out;
 
        trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
@@ -1479,7 +1479,7 @@ void kick_process(struct task_struct *p)
 EXPORT_SYMBOL_GPL(kick_process);
 
 /*
- * ->cpus_allowed is protected by both rq->lock and p->pi_lock
+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
  *
  * A few notes on cpu_active vs cpu_online:
  *
@@ -1519,14 +1519,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                for_each_cpu(dest_cpu, nodemask) {
                        if (!cpu_active(dest_cpu))
                                continue;
-                       if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+                       if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
                                return dest_cpu;
                }
        }
 
        for (;;) {
                /* Any allowed, online CPU? */
-               for_each_cpu(dest_cpu, &p->cpus_allowed) {
+               for_each_cpu(dest_cpu, p->cpus_ptr) {
                        if (!is_cpu_allowed(p, dest_cpu))
                                continue;
 
@@ -1570,7 +1570,7 @@ out:
 }
 
 /*
- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
+ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
  */
 static inline
 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
@@ -1580,11 +1580,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
        if (p->nr_cpus_allowed > 1)
                cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
        else
-               cpu = cpumask_any(&p->cpus_allowed);
+               cpu = cpumask_any(p->cpus_ptr);
 
        /*
         * In order not to call set_task_cpu() on a blocking task we need
-        * to rely on ttwu() to place the task on a valid ->cpus_allowed
+        * to rely on ttwu() to place the task on a valid ->cpus_ptr
         * CPU.
         *
         * Since this is common to all placement strategies, this lives here.
@@ -2395,7 +2395,7 @@ void wake_up_new_task(struct task_struct *p)
 #ifdef CONFIG_SMP
        /*
         * Fork balancing, do it here and not earlier because:
-        *  - cpus_allowed can change in the fork path
+        *  - cpus_ptr can change in the fork path
         *  - any previously selected CPU might disappear through hotplug
         *
         * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
@@ -4267,7 +4267,7 @@ change:
                         * the entire root_domain to become SCHED_DEADLINE. We
                         * will also fail if there's no bandwidth available.
                         */
-                       if (!cpumask_subset(span, &p->cpus_allowed) ||
+                       if (!cpumask_subset(span, p->cpus_ptr) ||
                            rq->rd->dl_bw.bw == 0) {
                                task_rq_unlock(rq, p, &rf);
                                return -EPERM;
@@ -4866,7 +4866,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
                goto out_unlock;
 
        raw_spin_lock_irqsave(&p->pi_lock, flags);
-       cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+       cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 out_unlock:
@@ -5443,7 +5443,7 @@ int task_can_attach(struct task_struct *p,
         * allowed nodes is unnecessary.  Thus, cpusets are not
         * applicable for such threads.  This prevents checking for
         * success of set_cpus_allowed_ptr() on all attached tasks
-        * before cpus_allowed may be changed.
+        * before cpus_mask may be changed.
         */
        if (p->flags & PF_NO_SETAFFINITY) {
                ret = -EINVAL;
@@ -5470,7 +5470,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
        if (curr_cpu == target_cpu)
                return 0;
 
-       if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
+       if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
                return -EINVAL;
 
        /* TODO: This is not properly updating schedstats */
@@ -5608,7 +5608,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
                put_prev_task(rq, next);
 
                /*
-                * Rules for changing task_struct::cpus_allowed are holding
+                * Rules for changing task_struct::cpus_mask are holding
                 * both pi_lock and rq->lock, such that holding either
                 * stabilizes the mask.
                 *
index 50316455ea66a9e46aa1465272b8cf7f6174ba46..d57fb2f8ae67bf167f231473207e06ce81709157 100644 (file)
@@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
        const struct sched_dl_entity *dl_se = &p->dl;
 
        if (later_mask &&
-           cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
+           cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
                return 1;
        } else {
                int best_cpu = cpudl_maximum(cp);
 
                WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
 
-               if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
+               if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
                    dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
                        if (later_mask)
                                cpumask_set_cpu(best_cpu, later_mask);
index daaadf939ccb1e9349e716e9d97359d0c7258519..f7d2c10b4c92e956fb6e431572e47b50edcc4490 100644 (file)
@@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
                if (skip)
                        continue;
 
-               if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+               if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
                        continue;
 
                if (lowest_mask) {
-                       cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+                       cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
 
                        /*
                         * We have to ensure that we have at least one bit
index 43901fa3f26932d334f34f4b474a3cb55821513a..c1ef308610685a67f1bf27d3fb52f42881b21316 100644 (file)
@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
                 * If we cannot preempt any rq, fall back to pick any
                 * online CPU:
                 */
-               cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+               cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
                if (cpu >= nr_cpu_ids) {
                        /*
                         * Failed to find any suitable CPU.
@@ -1824,7 +1824,7 @@ static void set_curr_task_dl(struct rq *rq)
 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
-           cpumask_test_cpu(cpu, &p->cpus_allowed))
+           cpumask_test_cpu(cpu, p->cpus_ptr))
                return 1;
        return 0;
 }
@@ -1974,7 +1974,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
                /* Retry if something changed. */
                if (double_lock_balance(rq, later_rq)) {
                        if (unlikely(task_rq(task) != rq ||
-                                    !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
+                                    !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
                                     task_running(rq, task) ||
                                     !dl_task(task) ||
                                     !task_on_rq_queued(task))) {
index f35930f5e528a8e1ca8e5f8ed5a6556c86a54701..8691a8fffe402fccdb37f3dfdc92ab43cdcf66cc 100644 (file)
@@ -1621,7 +1621,7 @@ static void task_numa_compare(struct task_numa_env *env,
         * be incurred if the tasks were swapped.
         */
        /* Skip this swap candidate if cannot move to the source cpu */
-       if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
+       if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
                goto unlock;
 
        /*
@@ -1718,7 +1718,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
 
        for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
                /* Skip this CPU if the source task cannot migrate */
-               if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
+               if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
                        continue;
 
                env->dst_cpu = cpu;
@@ -5831,7 +5831,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                /* Skip over this group if it has no CPUs allowed */
                if (!cpumask_intersects(sched_group_span(group),
-                                       &p->cpus_allowed))
+                                       p->cpus_ptr))
                        continue;
 
                local_group = cpumask_test_cpu(this_cpu,
@@ -5963,7 +5963,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
                return cpumask_first(sched_group_span(group));
 
        /* Traverse only the allowed CPUs */
-       for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
+       for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
                if (available_idle_cpu(i)) {
                        struct rq *rq = cpu_rq(i);
                        struct cpuidle_state *idle = idle_get_state(rq);
@@ -6003,7 +6003,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
 {
        int new_cpu = cpu;
 
-       if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
+       if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
                return prev_cpu;
 
        /*
@@ -6120,7 +6120,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
        if (!test_idle_cores(target, false))
                return -1;
 
-       cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
+       cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 
        for_each_cpu_wrap(core, cpus, target) {
                bool idle = true;
@@ -6154,7 +6154,7 @@ static int select_idle_smt(struct task_struct *p, int target)
                return -1;
 
        for_each_cpu(cpu, cpu_smt_mask(target)) {
-               if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+               if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                        continue;
                if (available_idle_cpu(cpu))
                        return cpu;
@@ -6217,7 +6217,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
        for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
                if (!--nr)
                        return -1;
-               if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+               if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                        continue;
                if (available_idle_cpu(cpu))
                        break;
@@ -6254,7 +6254,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
            recent_used_cpu != target &&
            cpus_share_cache(recent_used_cpu, target) &&
            available_idle_cpu(recent_used_cpu) &&
-           cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
+           cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
                /*
                 * Replace recent_used_cpu with prev as it is a potential
                 * candidate for the next wake:
@@ -6600,7 +6600,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                int max_spare_cap_cpu = -1;
 
                for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
-                       if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+                       if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                                continue;
 
                        /* Skip CPUs that will be overutilized. */
@@ -6689,7 +6689,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
                }
 
                want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
-                             cpumask_test_cpu(cpu, &p->cpus_allowed);
+                             cpumask_test_cpu(cpu, p->cpus_ptr);
        }
 
        rcu_read_lock();
@@ -7445,14 +7445,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
        /*
         * We do not migrate tasks that are:
         * 1) throttled_lb_pair, or
-        * 2) cannot be migrated to this CPU due to cpus_allowed, or
+        * 2) cannot be migrated to this CPU due to cpus_ptr, or
         * 3) running (obviously), or
         * 4) are cache-hot on their current CPU.
         */
        if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
                return 0;
 
-       if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
+       if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
                int cpu;
 
                schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
@@ -7472,7 +7472,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
 
                /* Prevent to re-select dst_cpu via env's CPUs: */
                for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
-                       if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
+                       if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
                                env->flags |= LBF_DST_PINNED;
                                env->new_dst_cpu = cpu;
                                break;
@@ -8099,7 +8099,7 @@ static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
 
 /*
  * Group imbalance indicates (and tries to solve) the problem where balancing
- * groups is inadequate due to ->cpus_allowed constraints.
+ * groups is inadequate due to ->cpus_ptr constraints.
  *
  * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
  * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
@@ -8768,7 +8768,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
        /*
         * If the busiest group is imbalanced the below checks don't
         * work because they assume all things are equal, which typically
-        * isn't true due to cpus_allowed constraints and the like.
+        * isn't true due to cpus_ptr constraints and the like.
         */
        if (busiest->group_type == group_imbalanced)
                goto force_balance;
@@ -9210,7 +9210,7 @@ more_balance:
                         * if the curr task on busiest CPU can't be
                         * moved to this_cpu:
                         */
-                       if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
+                       if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
                                raw_spin_unlock_irqrestore(&busiest->lock,
                                                            flags);
                                env.flags |= LBF_ALL_PINNED;
index 1e6b909dca367c58d332a0de3d229932c6bee83f..63ad7c90822c1d03fe748c6fb5ea010f61b9d2fd 100644 (file)
@@ -1614,7 +1614,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
-           cpumask_test_cpu(cpu, &p->cpus_allowed))
+           cpumask_test_cpu(cpu, p->cpus_ptr))
                return 1;
 
        return 0;
@@ -1751,7 +1751,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                         * Also make sure that it wasn't scheduled on its rq.
                         */
                        if (unlikely(task_rq(task) != rq ||
-                                    !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
+                                    !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
                                     task_running(rq, task) ||
                                     !rt_task(task) ||
                                     !task_on_rq_queued(task))) {
index 1e6db9cbe4dc518b0f0a482cf1c7e5eeffa0a3c4..fa95139445b28609b14544342ac6f4c88f3f56bd 100644 (file)
@@ -277,7 +277,7 @@ static void move_to_next_cpu(void)
         * of this thread, than stop migrating for the duration
         * of the current test.
         */
-       if (!cpumask_equal(current_mask, &current->cpus_allowed))
+       if (!cpumask_equal(current_mask, current->cpus_ptr))
                goto disable;
 
        get_online_cpus();
index 157d9e31f6c214f97e5b743a74657194d98501bb..60ba93fc42ce3810b53e0698ed8de1924175371d 100644 (file)
@@ -23,7 +23,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
         * Kernel threads bound to a single CPU can safely use
         * smp_processor_id():
         */
-       if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+       if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
                goto out;
 
        /*
index 1da597aa614160b50397459f33e627b42ca2be96..1a72b7d95cdc74701ee70832853e0f85e2d25e16 100644 (file)
@@ -34,7 +34,7 @@ static void simple_thread_func(int cnt)
 
        /* Silly tracepoints */
        trace_foo_bar("hello", cnt, array, random_strings[len],
-                     &current->cpus_allowed);
+                     current->cpus_ptr);
 
        trace_foo_with_template_simple("HELLO", cnt);