#include <asm/page.h>
#include <asm/ptrace.h>
-#include <linux/cputime.h>
#include <linux/smp.h>
#include <linux/sem.h>
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
-/* Convenience macros for the sake of set_task_state */
+/* Convenience macros for the sake of set_current_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-#define __set_task_state(tsk, state_value) \
- do { \
- (tsk)->task_state_change = _THIS_IP_; \
- (tsk)->state = (state_value); \
- } while (0)
-#define set_task_state(tsk, state_value) \
- do { \
- (tsk)->task_state_change = _THIS_IP_; \
- smp_store_mb((tsk)->state, (state_value)); \
- } while (0)
-
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
} while (0)
#else
-
-/*
- * @tsk had better be current, or you get to keep the pieces.
- *
- * The only reason is that computing current can be more expensive than
- * using a pointer that's already available.
- *
- * Therefore, see set_current_state().
- */
-#define __set_task_state(tsk, state_value) \
- do { (tsk)->state = (state_value); } while (0)
-#define set_task_state(tsk, state_value) \
- smp_store_mb((tsk)->state, (state_value))
-
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
+extern int __must_check io_schedule_prepare(void);
+extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
-
-static inline void io_schedule(void)
-{
- io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
-}
+extern void io_schedule(void);
void __noreturn do_task_dead(void);
int ac_flag;
long ac_exitcode;
unsigned long ac_mem;
- cputime_t ac_utime, ac_stime;
+ u64 ac_utime, ac_stime;
unsigned long ac_minflt, ac_majflt;
};
struct cpu_itimer {
- cputime_t expires;
- cputime_t incr;
- u32 error;
- u32 incr_error;
+ u64 expires;
+ u64 incr;
};
/**
*/
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- cputime_t utime;
- cputime_t stime;
+ u64 utime;
+ u64 stime;
raw_spinlock_t lock;
#endif
};
/**
* struct task_cputime - collected CPU time counts
- * @utime: time spent in user mode, in &cputime_t units
- * @stime: time spent in kernel mode, in &cputime_t units
+ * @utime: time spent in user mode, in nanoseconds
+ * @stime: time spent in kernel mode, in nanoseconds
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are tracked for
* these counts together and treat all three of them in parallel.
*/
struct task_cputime {
- cputime_t utime;
- cputime_t stime;
+ u64 utime;
+ u64 stime;
unsigned long long sum_exec_runtime;
};
#define prof_exp stime
#define sched_exp sum_exec_runtime
-#define INIT_CPUTIME \
- (struct task_cputime) { \
- .utime = 0, \
- .stime = 0, \
- .sum_exec_runtime = 0, \
- }
-
/*
* This is the atomic variant of task_cputime, which can be used for
* storing and updating task_cputime statistics without locking.
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;
+#ifdef CONFIG_POSIX_TIMERS
+
/* POSIX.1b Interval Timers */
int posix_timer_id;
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
- struct pid *leader_pid;
ktime_t it_real_incr;
/*
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+#endif
+
+ struct pid *leader_pid;
+
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
#endif
- struct list_head cpu_timers[3];
-
struct pid *tty_old_pgrp;
/* boolean value for session group leader */
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
- cputime_t utime, stime, cutime, cstime;
- cputime_t gtime;
- cputime_t cgtime;
+ u64 utime, stime, cutime, cstime;
+ u64 gtime;
+ u64 cgtime;
struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+ SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+ unsigned int flags)
+{
+ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
*
* The DEFINE_WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be
- * called near the end of a function, where the fact that the queue is
- * not used again will be easy to see by inspection.
+ * called near the end of a function. Otherwise, the list can be
+ * re-initialized for later re-use by wake_q_init().
*
* Note that this can cause spurious wakeups. schedule() callers
* must ensure the call is done inside a loop, confirming that the
#define DEFINE_WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+static inline void wake_q_init(struct wake_q_head *head)
+{
+ head->first = WAKE_Q_TAIL;
+ head->lastp = &head->first;
+}
+
extern void wake_q_add(struct wake_q_head *head,
struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- cputime_t utime, stime;
+ u64 utime, stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
- cputime_t utimescaled, stimescaled;
+ u64 utimescaled, stimescaled;
#endif
- cputime_t gtime;
+ u64 gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_t vtime_seqcount;
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
+#ifdef CONFIG_POSIX_TIMERS
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
+#endif
/* process credentials */
const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
- cputime_t acct_timexpd; /* stime + utime since last update */
+ u64 acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
- cputime_t *utime, cputime_t *stime);
-extern cputime_t task_gtime(struct task_struct *t);
+ u64 *utime, u64 *stime);
+extern u64 task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
- cputime_t *utime, cputime_t *stime)
+ u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
}
-static inline cputime_t task_gtime(struct task_struct *t)
+static inline u64 task_gtime(struct task_struct *t)
{
return t->gtime;
}
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
static inline void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled,
- cputime_t *stimescaled)
+ u64 *utimescaled,
+ u64 *stimescaled)
{
*utimescaled = t->utimescaled;
*stimescaled = t->stimescaled;
}
#else
static inline void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled,
- cputime_t *stimescaled)
+ u64 *utimescaled,
+ u64 *stimescaled)
{
task_cputime(t, utimescaled, stimescaled);
}
#endif
-extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
-extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
+extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
/*
* Per process flags
extern void sched_clock_init(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline void sched_clock_init_late(void)
+{
+}
+
static inline void sched_clock_tick(void)
{
}
+static inline void clear_sched_clock_stable(void)
+{
+}
+
static inline void sched_clock_idle_sleep_event(void)
{
}
return sched_clock();
}
#else
+extern void sched_clock_init_late(void);
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* is reliable after all:
*/
extern int sched_clock_stable(void);
-extern void set_sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);