Merge branch 'linus' into sched/devel
authorIngo Molnar <mingo@elte.hu>
Sat, 6 Sep 2008 14:51:57 +0000 (16:51 +0200)
committerIngo Molnar <mingo@elte.hu>
Sat, 6 Sep 2008 14:51:57 +0000 (16:51 +0200)
1  2 
include/linux/sched.h
kernel/sched.c

diff --combined include/linux/sched.h
index 08a87b5f29e17ad04fcfc189b0c359b71e382bf9,3d9120c5ad1589a0da722e514c370c0a3f1c4fe4..b3b7a8f32477aa096c170e1343df846d76df2846
@@@ -451,8 -451,8 +451,8 @@@ struct signal_struct 
         * - everyone except group_exit_task is stopped during signal delivery
         *   of fatal signals, group_exit_task processes the signal.
         */
 -      struct task_struct      *group_exit_task;
        int                     notify_count;
 +      struct task_struct      *group_exit_task;
  
        /* thread group stop support, overloads group_exit_code too */
        int                     group_stop_count;
@@@ -1010,8 -1010,8 +1010,8 @@@ struct sched_entity 
  
  struct sched_rt_entity {
        struct list_head run_list;
 -      unsigned int time_slice;
        unsigned long timeout;
 +      unsigned int time_slice;
        int nr_cpus_allowed;
  
        struct sched_rt_entity *back;
@@@ -1475,6 -1475,10 +1475,10 @@@ static inline void put_task_struct(stru
                __put_task_struct(t);
  }
  
+ extern cputime_t task_utime(struct task_struct *p);
+ extern cputime_t task_stime(struct task_struct *p);
+ extern cputime_t task_gtime(struct task_struct *p);
  /*
   * Per process flags
   */
diff --combined kernel/sched.c
index b112caaa400a197da958e463333d94a39c0b5f07,1a5f73c1fcdcd12983c6a2eed9ef66e630dca837..8626ae50ce0826229ee272af55c97622cfb2927e
@@@ -1921,8 -1921,11 +1921,8 @@@ unsigned long wait_task_inactive(struc
                running = task_running(rq, p);
                on_rq = p->se.on_rq;
                ncsw = 0;
 -              if (!match_state || p->state == match_state) {
 -                      ncsw = p->nivcsw + p->nvcsw;
 -                      if (unlikely(!ncsw))
 -                              ncsw = 1;
 -              }
 +              if (!match_state || p->state == match_state)
 +                      ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
                task_rq_unlock(rq, &flags);
  
                /*
@@@ -4175,6 -4178,65 +4175,65 @@@ void account_steal_time(struct task_str
                cpustat->steal = cputime64_add(cpustat->steal, tmp);
  }
  
+ /*
+  * Use precise platform statistics if available:
+  */
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ cputime_t task_utime(struct task_struct *p)
+ {
+       return p->utime;
+ }
+ cputime_t task_stime(struct task_struct *p)
+ {
+       return p->stime;
+ }
+ #else
+ cputime_t task_utime(struct task_struct *p)
+ {
+       clock_t utime = cputime_to_clock_t(p->utime),
+               total = utime + cputime_to_clock_t(p->stime);
+       u64 temp;
+       /*
+        * Use CFS's precise accounting:
+        */
+       temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
+       if (total) {
+               temp *= utime;
+               do_div(temp, total);
+       }
+       utime = (clock_t)temp;
+       p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
+       return p->prev_utime;
+ }
+ cputime_t task_stime(struct task_struct *p)
+ {
+       clock_t stime;
+       /*
+        * Use CFS's precise accounting. (we subtract utime from
+        * the total, to make sure the total observed by userspace
+        * grows monotonically - apps rely on that):
+        */
+       stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
+                       cputime_to_clock_t(task_utime(p));
+       if (stime >= 0)
+               p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
+       return p->prev_stime;
+ }
+ #endif
+ inline cputime_t task_gtime(struct task_struct *p)
+ {
+       return p->gtime;
+ }
  /*
   * This function gets called by the timer code, with HZ frequency.
   * We call it with interrupts disabled.
@@@ -4565,15 -4627,6 +4624,15 @@@ __wake_up_sync(wait_queue_head_t *q, un
  }
  EXPORT_SYMBOL_GPL(__wake_up_sync);    /* For internal use only */
  
 +/**
 + * complete: - signals a single thread waiting on this completion
 + * @x:  holds the state of this particular completion
 + *
 + * This will wake up a single thread waiting on this completion. Threads will be
 + * awakened in the same order in which they were queued.
 + *
 + * See also complete_all(), wait_for_completion() and related routines.
 + */
  void complete(struct completion *x)
  {
        unsigned long flags;
  }
  EXPORT_SYMBOL(complete);
  
 +/**
 + * complete_all: - signals all threads waiting on this completion
 + * @x:  holds the state of this particular completion
 + *
 + * This will wake up all threads waiting on this particular completion event.
 + */
  void complete_all(struct completion *x)
  {
        unsigned long flags;
@@@ -4611,7 -4658,10 +4670,7 @@@ do_wait_for_common(struct completion *x
                wait.flags |= WQ_FLAG_EXCLUSIVE;
                __add_wait_queue_tail(&x->wait, &wait);
                do {
 -                      if ((state == TASK_INTERRUPTIBLE &&
 -                           signal_pending(current)) ||
 -                          (state == TASK_KILLABLE &&
 -                           fatal_signal_pending(current))) {
 +                      if (signal_pending_state(state, current)) {
                                timeout = -ERESTARTSYS;
                                break;
                        }
@@@ -4639,31 -4689,12 +4698,31 @@@ wait_for_common(struct completion *x, l
        return timeout;
  }
  
 +/**
 + * wait_for_completion: - waits for completion of a task
 + * @x:  holds the state of this particular completion
 + *
 + * This waits to be signaled for completion of a specific task. It is NOT
 + * interruptible and there is no timeout.
 + *
 + * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
 + * and interrupt capability. Also see complete().
 + */
  void __sched wait_for_completion(struct completion *x)
  {
        wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
  }
  EXPORT_SYMBOL(wait_for_completion);
  
 +/**
 + * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
 + * @x:  holds the state of this particular completion
 + * @timeout:  timeout value in jiffies
 + *
 + * This waits for either a completion of a specific task to be signaled or for a
 + * specified timeout to expire. The timeout is in jiffies. It is not
 + * interruptible.
 + */
  unsigned long __sched
  wait_for_completion_timeout(struct completion *x, unsigned long timeout)
  {
  }
  EXPORT_SYMBOL(wait_for_completion_timeout);
  
 +/**
 + * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
 + * @x:  holds the state of this particular completion
 + *
 + * This waits for completion of a specific task to be signaled. It is
 + * interruptible.
 + */
  int __sched wait_for_completion_interruptible(struct completion *x)
  {
        long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
  }
  EXPORT_SYMBOL(wait_for_completion_interruptible);
  
 +/**
 + * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
 + * @x:  holds the state of this particular completion
 + * @timeout:  timeout value in jiffies
 + *
 + * This waits for either a completion of a specific task to be signaled or for a
 + * specified timeout to expire. It is interruptible. The timeout is in jiffies.
 + */
  unsigned long __sched
  wait_for_completion_interruptible_timeout(struct completion *x,
                                          unsigned long timeout)
  }
  EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  
 +/**
 + * wait_for_completion_killable: - waits for completion of a task (killable)
 + * @x:  holds the state of this particular completion
 + *
 + * This waits to be signaled for completion of a specific task. It can be
 + * interrupted by a kill signal.
 + */
  int __sched wait_for_completion_killable(struct completion *x)
  {
        long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
@@@ -8226,25 -8235,20 +8285,25 @@@ void __might_sleep(char *file, int line
  #ifdef in_atomic
        static unsigned long prev_jiffy;        /* ratelimiting */
  
 -      if ((in_atomic() || irqs_disabled()) &&
 -          system_state == SYSTEM_RUNNING && !oops_in_progress) {
 -              if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 -                      return;
 -              prev_jiffy = jiffies;
 -              printk(KERN_ERR "BUG: sleeping function called from invalid"
 -                              " context at %s:%d\n", file, line);
 -              printk("in_atomic():%d, irqs_disabled():%d\n",
 -                      in_atomic(), irqs_disabled());
 -              debug_show_held_locks(current);
 -              if (irqs_disabled())
 -                      print_irqtrace_events(current);
 -              dump_stack();
 -      }
 +      if ((!in_atomic() && !irqs_disabled()) ||
 +                  system_state != SYSTEM_RUNNING || oops_in_progress)
 +              return;
 +      if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 +              return;
 +      prev_jiffy = jiffies;
 +
 +      printk(KERN_ERR
 +              "BUG: sleeping function called from invalid context at %s:%d\n",
 +                      file, line);
 +      printk(KERN_ERR
 +              "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
 +                      in_atomic(), irqs_disabled(),
 +                      current->pid, current->comm);
 +
 +      debug_show_held_locks(current);
 +      if (irqs_disabled())
 +              print_irqtrace_events(current);
 +      dump_stack();
  #endif
  }
  EXPORT_SYMBOL(__might_sleep);