git.samba.org
/
sfrench
/
cifs-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
sysctl kernel: Remove binary sysctl logic
[sfrench/cifs-2.6.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index 76c0e9691fc0dd2e937a0fe6098140917828d148..dbb99d787a4190fce6535c47ae225f41b4b9d48c 100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-676,6
+676,7
@@
inline void update_rq_clock(struct rq *rq)
/**
* runqueue_is_locked
/**
* runqueue_is_locked
+ * @cpu: the processor in question.
*
* Returns true if the current cpu runqueue is locked.
* This interface allows printk to be called with the runqueue lock
*
* Returns true if the current cpu runqueue is locked.
* This interface allows printk to be called with the runqueue lock
@@
-1563,11
+1564,7
@@
static unsigned long cpu_avg_load_per_task(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_FAIR_GROUP_SCHED
-struct update_shares_data {
- unsigned long rq_weight[NR_CPUS];
-};
-
-static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
+static __read_mostly unsigned long *update_shares_data;
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
@@
-1577,12
+1574,12
@@
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares,
unsigned long sd_rq_weight,
static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares,
unsigned long sd_rq_weight,
-
struct update_shares_data *usd
)
+
unsigned long *usd_rq_weight
)
{
unsigned long shares, rq_weight;
int boost = 0;
{
unsigned long shares, rq_weight;
int boost = 0;
- rq_weight = usd
->
rq_weight[cpu];
+ rq_weight = usd
_
rq_weight[cpu];
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
@@
-1617,7
+1614,7
@@
static void update_group_shares_cpu(struct task_group *tg, int cpu,
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long weight, rq_weight = 0, shares = 0;
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long weight, rq_weight = 0, shares = 0;
-
struct update_shares_data *usd
;
+
unsigned long *usd_rq_weight
;
struct sched_domain *sd = data;
unsigned long flags;
int i;
struct sched_domain *sd = data;
unsigned long flags;
int i;
@@
-1626,11
+1623,11
@@
static int tg_shares_up(struct task_group *tg, void *data)
return 0;
local_irq_save(flags);
return 0;
local_irq_save(flags);
- usd
= &__get_cpu_var(update_shares_data
);
+ usd
_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()
);
for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight;
for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight;
- usd
->
rq_weight[i] = weight;
+ usd
_
rq_weight[i] = weight;
/*
* If there are currently no tasks on the cpu pretend there
/*
* If there are currently no tasks on the cpu pretend there
@@
-1651,7
+1648,7
@@
static int tg_shares_up(struct task_group *tg, void *data)
shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd))
shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd))
- update_group_shares_cpu(tg, i, shares, rq_weight, usd);
+ update_group_shares_cpu(tg, i, shares, rq_weight, usd
_rq_weight
);
local_irq_restore(flags);
local_irq_restore(flags);
@@
-2311,7
+2308,7
@@
static int try_to_wake_up(struct task_struct *p, unsigned int state,
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
- struct rq *rq;
+ struct rq *rq
, *orig_rq
;
if (!sched_feat(SYNC_WAKEUPS))
wake_flags &= ~WF_SYNC;
if (!sched_feat(SYNC_WAKEUPS))
wake_flags &= ~WF_SYNC;
@@
-2319,7
+2316,7
@@
static int try_to_wake_up(struct task_struct *p, unsigned int state,
this_cpu = get_cpu();
smp_wmb();
this_cpu = get_cpu();
smp_wmb();
- rq = task_rq_lock(p, &flags);
+ rq =
orig_rq =
task_rq_lock(p, &flags);
update_rq_clock(rq);
if (!(p->state & state))
goto out;
update_rq_clock(rq);
if (!(p->state & state))
goto out;
@@
-2350,6
+2347,10
@@
static int try_to_wake_up(struct task_struct *p, unsigned int state,
set_task_cpu(p, cpu);
rq = task_rq_lock(p, &flags);
set_task_cpu(p, cpu);
rq = task_rq_lock(p, &flags);
+
+ if (rq != orig_rq)
+ update_rq_clock(rq);
+
WARN_ON(p->state != TASK_WAKING);
cpu = task_cpu(p);
WARN_ON(p->state != TASK_WAKING);
cpu = task_cpu(p);
@@
-3656,6
+3657,7
@@
static void update_group_power(struct sched_domain *sd, int cpu)
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
+ * @sd: The sched_domain whose statistics are to be updated.
* @group: sched_group whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
* @group: sched_group whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
@@
-6718,9
+6720,6
@@
EXPORT_SYMBOL(yield);
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
- *
- * But don't do that if it is a deliberate, throttling IO wait (this task
- * has set its backing_dev_info: the queue against which it should throttle)
*/
void __sched io_schedule(void)
{
*/
void __sched io_schedule(void)
{
@@
-7374,17
+7373,16
@@
static struct ctl_table sd_ctl_dir[] = {
.procname = "sched_domain",
.mode = 0555,
},
.procname = "sched_domain",
.mode = 0555,
},
- {
0, },
+ {
}
};
static struct ctl_table sd_ctl_root[] = {
{
};
static struct ctl_table sd_ctl_root[] = {
{
- .ctl_name = CTL_KERN,
.procname = "kernel",
.mode = 0555,
.child = sd_ctl_dir,
},
.procname = "kernel",
.mode = 0555,
.child = sd_ctl_dir,
},
- {
0, },
+ {
}
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
@@
-9404,6
+9402,10
@@
void __init sched_init(void)
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_GROUP_SCHED */
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_GROUP_SCHED */
+#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
+ update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
+ __alignof__(unsigned long));
+#endif
for_each_possible_cpu(i) {
struct rq *rq;
for_each_possible_cpu(i) {
struct rq *rq;