1 // SPDX-License-Identifier: GPL-2.0
3 * Implement CPU time clocks for the POSIX clock interface.
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
19 #include "posix-timers.h"
21 static void posix_cpu_timer_rearm(struct k_itimer *timer);
23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
25 posix_cputimers_init(pct);
26 if (cpu_limit != RLIM_INFINITY) {
27 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
28 pct->timers_active = true;
33 * Called after updating RLIMIT_CPU to run cpu timer and update
34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35 * necessary. Needs siglock protection since other code may update the
36 * expiration cache as well.
38 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
40 u64 nsecs = rlim_new * NSEC_PER_SEC;
42 spin_lock_irq(&task->sighand->siglock);
43 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
44 spin_unlock_irq(&task->sighand->siglock);
48 * Functions for validating access to tasks.
50 static struct task_struct *lookup_task(const pid_t pid, bool thread,
53 struct task_struct *p;
56 * If the encoded PID is 0, then the timer is targeted at current
57 * or the process to which current belongs.
60 return thread ? current : current->group_leader;
62 p = find_task_by_vpid(pid);
67 return same_thread_group(p, current) ? p : NULL;
71 * For clock_gettime(PROCESS) the task does not need to be
72 * the actual group leader. tsk->sighand gives
73 * access to the group's clock.
75 * Timers need the group leader because they take a
76 * reference on it and store the task pointer until the
79 return (p == current || thread_group_leader(p)) ? p : NULL;
83 * For processes require that p is group leader.
85 return has_group_leader_pid(p) ? p : NULL;
88 static struct task_struct *__get_task_for_clock(const clockid_t clock,
89 bool getref, bool gettime)
91 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
92 const pid_t pid = CPUCLOCK_PID(clock);
93 struct task_struct *p;
95 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
99 p = lookup_task(pid, thread, gettime);
106 static inline struct task_struct *get_task_for_clock(const clockid_t clock)
108 return __get_task_for_clock(clock, true, false);
111 static inline struct task_struct *get_task_for_clock_get(const clockid_t clock)
113 return __get_task_for_clock(clock, true, true);
116 static inline int validate_clock_permissions(const clockid_t clock)
118 return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
121 static inline enum pid_type cpu_timer_pid_type(struct k_itimer *timer)
123 return CPUCLOCK_PERTHREAD(timer->it_clock) ? PIDTYPE_PID : PIDTYPE_TGID;
126 static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
128 return pid_task(timer->it.cpu.pid, cpu_timer_pid_type(timer));
132 * Update expiry time from increment, and increase overrun count,
133 * given the current clock sample.
135 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
137 u64 delta, incr, expires = timer->it.cpu.node.expires;
140 if (!timer->it_interval)
146 incr = timer->it_interval;
147 delta = now + incr - expires;
149 /* Don't use (incr*2 < delta), incr*2 might overflow. */
150 for (i = 0; incr < delta - incr; i++)
153 for (; i >= 0; incr >>= 1, i--) {
157 timer->it.cpu.node.expires += incr;
158 timer->it_overrun += 1LL << i;
161 return timer->it.cpu.node.expires;
164 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
165 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
167 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
168 ~pct->bases[CPUCLOCK_VIRT].nextevt |
169 ~pct->bases[CPUCLOCK_SCHED].nextevt);
173 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
175 int error = validate_clock_permissions(which_clock);
179 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
180 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
182 * If sched_clock is using a cycle counter, we
183 * don't have any idea of its true resolution
184 * exported, but it is much more than 1s/HZ.
193 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
195 int error = validate_clock_permissions(clock);
198 * You can never reset a CPU clock, but we check for other errors
199 * in the call before failing with EPERM.
201 return error ? : -EPERM;
205 * Sample a per-thread clock for the given task. clkid is validated.
207 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
211 if (clkid == CPUCLOCK_SCHED)
212 return task_sched_runtime(p);
214 task_cputime(p, &utime, &stime);
218 return utime + stime;
227 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
229 samples[CPUCLOCK_PROF] = stime + utime;
230 samples[CPUCLOCK_VIRT] = utime;
231 samples[CPUCLOCK_SCHED] = rtime;
234 static void task_sample_cputime(struct task_struct *p, u64 *samples)
238 task_cputime(p, &utime, &stime);
239 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
242 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
245 u64 stime, utime, rtime;
247 utime = atomic64_read(&at->utime);
248 stime = atomic64_read(&at->stime);
249 rtime = atomic64_read(&at->sum_exec_runtime);
250 store_samples(samples, stime, utime, rtime);
254 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
255 * to avoid race conditions with concurrent updates to cputime.
257 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
261 curr_cputime = atomic64_read(cputime);
262 if (sum_cputime > curr_cputime) {
263 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
268 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
269 struct task_cputime *sum)
271 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
272 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
273 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
277 * thread_group_sample_cputime - Sample cputime for a given task
278 * @tsk: Task for which cputime needs to be started
279 * @samples: Storage for time samples
281 * Called from sys_getitimer() to calculate the expiry time of an active
282 * timer. That means group cputime accounting is already active. Called
283 * with task sighand lock held.
285 * Updates @times with an uptodate sample of the thread group cputimes.
287 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
289 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
290 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
292 WARN_ON_ONCE(!pct->timers_active);
294 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
298 * thread_group_start_cputime - Start cputime and return a sample
299 * @tsk: Task for which cputime needs to be started
300 * @samples: Storage for time samples
302 * The thread group cputime accouting is avoided when there are no posix
303 * CPU timers armed. Before starting a timer it's required to check whether
304 * the time accounting is active. If not, a full update of the atomic
305 * accounting store needs to be done and the accounting enabled.
307 * Updates @times with an uptodate sample of the thread group cputimes.
309 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
311 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
312 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
314 /* Check if cputimer isn't running. This is accessed without locking. */
315 if (!READ_ONCE(pct->timers_active)) {
316 struct task_cputime sum;
319 * The POSIX timer interface allows for absolute time expiry
320 * values through the TIMER_ABSTIME flag, therefore we have
321 * to synchronize the timer to the clock every time we start it.
323 thread_group_cputime(tsk, &sum);
324 update_gt_cputime(&cputimer->cputime_atomic, &sum);
327 * We're setting timers_active without a lock. Ensure this
328 * only gets written to in one operation. We set it after
329 * update_gt_cputime() as a small optimization, but
330 * barriers are not required because update_gt_cputime()
331 * can handle concurrent updates.
333 WRITE_ONCE(pct->timers_active, true);
335 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
338 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
340 struct task_cputime ct;
342 thread_group_cputime(tsk, &ct);
343 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
347 * Sample a process (thread group) clock for the given task clkid. If the
348 * group's cputime accounting is already enabled, read the atomic
349 * store. Otherwise a full update is required. clkid is already validated.
351 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
354 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
355 struct posix_cputimers *pct = &p->signal->posix_cputimers;
356 u64 samples[CPUCLOCK_MAX];
358 if (!READ_ONCE(pct->timers_active)) {
360 thread_group_start_cputime(p, samples);
362 __thread_group_cputime(p, samples);
364 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
367 return samples[clkid];
370 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
372 const clockid_t clkid = CPUCLOCK_WHICH(clock);
373 struct task_struct *tsk;
376 tsk = get_task_for_clock_get(clock);
380 if (CPUCLOCK_PERTHREAD(clock))
381 t = cpu_clock_sample(clkid, tsk);
383 t = cpu_clock_sample_group(clkid, tsk, false);
384 put_task_struct(tsk);
386 *tp = ns_to_timespec64(t);
391 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
392 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
393 * new timer already all-zeros initialized.
395 static int posix_cpu_timer_create(struct k_itimer *new_timer)
397 struct task_struct *p = get_task_for_clock(new_timer->it_clock);
402 new_timer->kclock = &clock_posix_cpu;
403 timerqueue_init(&new_timer->it.cpu.node);
404 new_timer->it.cpu.pid = get_task_pid(p, cpu_timer_pid_type(new_timer));
406 * get_task_for_clock() took a reference on @p. Drop it as the timer
407 * holds a reference on the pid of @p.
414 * Clean up a CPU-clock timer that is about to be destroyed.
415 * This is called from timer deletion with the timer already locked.
416 * If we return TIMER_RETRY, it's necessary to release the timer's lock
417 * and try again. (This happens when the timer is in the middle of firing.)
419 static int posix_cpu_timer_del(struct k_itimer *timer)
421 struct cpu_timer *ctmr = &timer->it.cpu;
422 struct sighand_struct *sighand;
423 struct task_struct *p;
428 p = cpu_timer_task_rcu(timer);
433 * Protect against sighand release/switch in exit/exec and process/
434 * thread timer list entry concurrent read/writes.
436 sighand = lock_task_sighand(p, &flags);
437 if (unlikely(sighand == NULL)) {
439 * This raced with the reaping of the task. The exit cleanup
440 * should have removed this timer from the timer queue.
442 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
444 if (timer->it.cpu.firing)
447 cpu_timer_dequeue(ctmr);
449 unlock_task_sighand(p, &flags);
460 static void cleanup_timerqueue(struct timerqueue_head *head)
462 struct timerqueue_node *node;
463 struct cpu_timer *ctmr;
465 while ((node = timerqueue_getnext(head))) {
466 timerqueue_del(head, node);
467 ctmr = container_of(node, struct cpu_timer, node);
473 * Clean out CPU timers which are still armed when a thread exits. The
474 * timers are only removed from the list. No other updates are done. The
475 * corresponding posix timers are still accessible, but cannot be rearmed.
477 * This must be called with the siglock held.
479 static void cleanup_timers(struct posix_cputimers *pct)
481 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
482 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
483 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
487 * These are both called with the siglock held, when the current thread
488 * is being reaped. When the final (leader) thread in the group is reaped,
489 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
491 void posix_cpu_timers_exit(struct task_struct *tsk)
493 cleanup_timers(&tsk->posix_cputimers);
495 void posix_cpu_timers_exit_group(struct task_struct *tsk)
497 cleanup_timers(&tsk->signal->posix_cputimers);
501 * Insert the timer on the appropriate list before any timers that
502 * expire later. This must be called with the sighand lock held.
504 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
506 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
507 struct cpu_timer *ctmr = &timer->it.cpu;
508 u64 newexp = cpu_timer_getexpires(ctmr);
509 struct posix_cputimer_base *base;
511 if (CPUCLOCK_PERTHREAD(timer->it_clock))
512 base = p->posix_cputimers.bases + clkidx;
514 base = p->signal->posix_cputimers.bases + clkidx;
516 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
520 * We are the new earliest-expiring POSIX 1.b timer, hence
521 * need to update expiration cache. Take into account that
522 * for process timers we share expiration cache with itimers
523 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
525 if (newexp < base->nextevt)
526 base->nextevt = newexp;
528 if (CPUCLOCK_PERTHREAD(timer->it_clock))
529 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
531 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
535 * The timer is locked, fire it and arrange for its reload.
537 static void cpu_timer_fire(struct k_itimer *timer)
539 struct cpu_timer *ctmr = &timer->it.cpu;
541 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
543 * User don't want any signal.
545 cpu_timer_setexpires(ctmr, 0);
546 } else if (unlikely(timer->sigq == NULL)) {
548 * This a special case for clock_nanosleep,
549 * not a normal timer from sys_timer_create.
551 wake_up_process(timer->it_process);
552 cpu_timer_setexpires(ctmr, 0);
553 } else if (!timer->it_interval) {
555 * One-shot timer. Clear it as soon as it's fired.
557 posix_timer_event(timer, 0);
558 cpu_timer_setexpires(ctmr, 0);
559 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
561 * The signal did not get queued because the signal
562 * was ignored, so we won't get any callback to
563 * reload the timer. But we need to keep it
564 * ticking in case the signal is deliverable next time.
566 posix_cpu_timer_rearm(timer);
567 ++timer->it_requeue_pending;
572 * Guts of sys_timer_settime for CPU timers.
573 * This is called with the timer locked and interrupts disabled.
574 * If we return TIMER_RETRY, it's necessary to release the timer's lock
575 * and try again. (This happens when the timer is in the middle of firing.)
577 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
578 struct itimerspec64 *new, struct itimerspec64 *old)
580 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
581 u64 old_expires, new_expires, old_incr, val;
582 struct cpu_timer *ctmr = &timer->it.cpu;
583 struct sighand_struct *sighand;
584 struct task_struct *p;
589 p = cpu_timer_task_rcu(timer);
592 * If p has just been reaped, we can no
593 * longer get any information about it at all.
600 * Use the to_ktime conversion because that clamps the maximum
601 * value to KTIME_MAX and avoid multiplication overflows.
603 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
606 * Protect against sighand release/switch in exit/exec and p->cpu_timers
607 * and p->signal->cpu_timers read/write in arm_timer()
609 sighand = lock_task_sighand(p, &flags);
611 * If p has just been reaped, we can no
612 * longer get any information about it at all.
614 if (unlikely(sighand == NULL)) {
620 * Disarm any old timer after extracting its expiry time.
622 old_incr = timer->it_interval;
623 old_expires = cpu_timer_getexpires(ctmr);
625 if (unlikely(timer->it.cpu.firing)) {
626 timer->it.cpu.firing = -1;
629 cpu_timer_dequeue(ctmr);
633 * We need to sample the current value to convert the new
634 * value from to relative and absolute, and to convert the
635 * old value from absolute to relative. To set a process
636 * timer, we need a sample to balance the thread expiry
637 * times (in arm_timer). With an absolute time, we must
638 * check if it's already passed. In short, we need a sample.
640 if (CPUCLOCK_PERTHREAD(timer->it_clock))
641 val = cpu_clock_sample(clkid, p);
643 val = cpu_clock_sample_group(clkid, p, true);
646 if (old_expires == 0) {
647 old->it_value.tv_sec = 0;
648 old->it_value.tv_nsec = 0;
651 * Update the timer in case it has overrun already.
652 * If it has, we'll report it as having overrun and
653 * with the next reloaded timer already ticking,
654 * though we are swallowing that pending
655 * notification here to install the new setting.
657 u64 exp = bump_cpu_timer(timer, val);
660 old_expires = exp - val;
661 old->it_value = ns_to_timespec64(old_expires);
663 old->it_value.tv_nsec = 1;
664 old->it_value.tv_sec = 0;
671 * We are colliding with the timer actually firing.
672 * Punt after filling in the timer's old value, and
673 * disable this firing since we are already reporting
674 * it as an overrun (thanks to bump_cpu_timer above).
676 unlock_task_sighand(p, &flags);
680 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
685 * Install the new expiry time (or zero).
686 * For a timer with no notification action, we don't actually
687 * arm the timer (we'll just fake it for timer_gettime).
689 cpu_timer_setexpires(ctmr, new_expires);
690 if (new_expires != 0 && val < new_expires) {
694 unlock_task_sighand(p, &flags);
696 * Install the new reload setting, and
697 * set up the signal and overrun bookkeeping.
699 timer->it_interval = timespec64_to_ktime(new->it_interval);
702 * This acts as a modification timestamp for the timer,
703 * so any automatic reload attempt will punt on seeing
704 * that we have reset the timer manually.
706 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
708 timer->it_overrun_last = 0;
709 timer->it_overrun = -1;
711 if (new_expires != 0 && !(val < new_expires)) {
713 * The designated time already passed, so we notify
714 * immediately, even if the thread never runs to
715 * accumulate more time on this clock.
717 cpu_timer_fire(timer);
724 old->it_interval = ns_to_timespec64(old_incr);
729 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
731 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
732 struct cpu_timer *ctmr = &timer->it.cpu;
733 u64 now, expires = cpu_timer_getexpires(ctmr);
734 struct task_struct *p;
737 p = cpu_timer_task_rcu(timer);
742 * Easy part: convert the reload time.
744 itp->it_interval = ktime_to_timespec64(timer->it_interval);
750 * Sample the clock to take the difference with the expiry time.
752 if (CPUCLOCK_PERTHREAD(timer->it_clock))
753 now = cpu_clock_sample(clkid, p);
755 now = cpu_clock_sample_group(clkid, p, false);
758 itp->it_value = ns_to_timespec64(expires - now);
761 * The timer should have expired already, but the firing
762 * hasn't taken place yet. Say it's just about to expire.
764 itp->it_value.tv_nsec = 1;
765 itp->it_value.tv_sec = 0;
771 #define MAX_COLLECTED 20
773 static u64 collect_timerqueue(struct timerqueue_head *head,
774 struct list_head *firing, u64 now)
776 struct timerqueue_node *next;
779 while ((next = timerqueue_getnext(head))) {
780 struct cpu_timer *ctmr;
783 ctmr = container_of(next, struct cpu_timer, node);
784 expires = cpu_timer_getexpires(ctmr);
785 /* Limit the number of timers to expire at once */
786 if (++i == MAX_COLLECTED || now < expires)
790 cpu_timer_dequeue(ctmr);
791 list_add_tail(&ctmr->elist, firing);
797 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
798 struct list_head *firing)
800 struct posix_cputimer_base *base = pct->bases;
803 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
804 base->nextevt = collect_timerqueue(&base->tqhead, firing,
809 static inline void check_dl_overrun(struct task_struct *tsk)
811 if (tsk->dl.dl_overrun) {
812 tsk->dl.dl_overrun = 0;
813 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
817 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
822 if (print_fatal_signals) {
823 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
824 rt ? "RT" : "CPU", hard ? "hard" : "soft",
825 current->comm, task_pid_nr(current));
827 __group_send_sig_info(signo, SEND_SIG_PRIV, current);
832 * Check for any per-thread CPU timers that have fired and move them off
833 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
834 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
836 static void check_thread_timers(struct task_struct *tsk,
837 struct list_head *firing)
839 struct posix_cputimers *pct = &tsk->posix_cputimers;
840 u64 samples[CPUCLOCK_MAX];
844 check_dl_overrun(tsk);
846 if (expiry_cache_is_inactive(pct))
849 task_sample_cputime(tsk, samples);
850 collect_posix_cputimers(pct, samples, firing);
853 * Check for the special case thread timers.
855 soft = task_rlimit(tsk, RLIMIT_RTTIME);
856 if (soft != RLIM_INFINITY) {
857 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
858 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
859 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
861 /* At the hard limit, send SIGKILL. No further action. */
862 if (hard != RLIM_INFINITY &&
863 check_rlimit(rttime, hard, SIGKILL, true, true))
866 /* At the soft limit, send a SIGXCPU every second */
867 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
868 soft += USEC_PER_SEC;
869 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
873 if (expiry_cache_is_inactive(pct))
874 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
877 static inline void stop_process_timers(struct signal_struct *sig)
879 struct posix_cputimers *pct = &sig->posix_cputimers;
881 /* Turn off the active flag. This is done without locking. */
882 WRITE_ONCE(pct->timers_active, false);
883 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
886 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
887 u64 *expires, u64 cur_time, int signo)
892 if (cur_time >= it->expires) {
894 it->expires += it->incr;
898 trace_itimer_expire(signo == SIGPROF ?
899 ITIMER_PROF : ITIMER_VIRTUAL,
900 task_tgid(tsk), cur_time);
901 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
904 if (it->expires && it->expires < *expires)
905 *expires = it->expires;
909 * Check for any per-thread CPU timers that have fired and move them
910 * off the tsk->*_timers list onto the firing list. Per-thread timers
911 * have already been taken off.
913 static void check_process_timers(struct task_struct *tsk,
914 struct list_head *firing)
916 struct signal_struct *const sig = tsk->signal;
917 struct posix_cputimers *pct = &sig->posix_cputimers;
918 u64 samples[CPUCLOCK_MAX];
922 * If there are no active process wide timers (POSIX 1.b, itimers,
923 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
924 * processing when there is already another task handling them.
926 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
930 * Signify that a thread is checking for process timers.
931 * Write access to this field is protected by the sighand lock.
933 pct->expiry_active = true;
936 * Collect the current process totals. Group accounting is active
937 * so the sample can be taken directly.
939 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
940 collect_posix_cputimers(pct, samples, firing);
943 * Check for the special case process timers.
945 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
946 &pct->bases[CPUCLOCK_PROF].nextevt,
947 samples[CPUCLOCK_PROF], SIGPROF);
948 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
949 &pct->bases[CPUCLOCK_VIRT].nextevt,
950 samples[CPUCLOCK_VIRT], SIGVTALRM);
952 soft = task_rlimit(tsk, RLIMIT_CPU);
953 if (soft != RLIM_INFINITY) {
954 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
955 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
956 u64 ptime = samples[CPUCLOCK_PROF];
957 u64 softns = (u64)soft * NSEC_PER_SEC;
958 u64 hardns = (u64)hard * NSEC_PER_SEC;
960 /* At the hard limit, send SIGKILL. No further action. */
961 if (hard != RLIM_INFINITY &&
962 check_rlimit(ptime, hardns, SIGKILL, false, true))
965 /* At the soft limit, send a SIGXCPU every second */
966 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
967 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
968 softns += NSEC_PER_SEC;
971 /* Update the expiry cache */
972 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
973 pct->bases[CPUCLOCK_PROF].nextevt = softns;
976 if (expiry_cache_is_inactive(pct))
977 stop_process_timers(sig);
979 pct->expiry_active = false;
983 * This is called from the signal code (via posixtimer_rearm)
984 * when the last timer signal was delivered and we have to reload the timer.
986 static void posix_cpu_timer_rearm(struct k_itimer *timer)
988 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
989 struct task_struct *p;
990 struct sighand_struct *sighand;
995 p = cpu_timer_task_rcu(timer);
1000 * Fetch the current sample and update the timer's expiry time.
1002 if (CPUCLOCK_PERTHREAD(timer->it_clock))
1003 now = cpu_clock_sample(clkid, p);
1005 now = cpu_clock_sample_group(clkid, p, true);
1007 bump_cpu_timer(timer, now);
1009 /* Protect timer list r/w in arm_timer() */
1010 sighand = lock_task_sighand(p, &flags);
1011 if (unlikely(sighand == NULL))
1015 * Now re-arm for the new expiry time.
1017 arm_timer(timer, p);
1018 unlock_task_sighand(p, &flags);
1024 * task_cputimers_expired - Check whether posix CPU timers are expired
1026 * @samples: Array of current samples for the CPUCLOCK clocks
1027 * @pct: Pointer to a posix_cputimers container
1029 * Returns true if any member of @samples is greater than the corresponding
1030 * member of @pct->bases[CLK].nextevt. False otherwise
1033 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1037 for (i = 0; i < CPUCLOCK_MAX; i++) {
1038 if (samples[i] >= pct->bases[i].nextevt)
1045 * fastpath_timer_check - POSIX CPU timers fast path.
1047 * @tsk: The task (thread) being checked.
1049 * Check the task and thread group timers. If both are zero (there are no
1050 * timers set) return false. Otherwise snapshot the task and thread group
1051 * timers and compare them with the corresponding expiration times. Return
1052 * true if a timer has expired, else return false.
1054 static inline bool fastpath_timer_check(struct task_struct *tsk)
1056 struct posix_cputimers *pct = &tsk->posix_cputimers;
1057 struct signal_struct *sig;
1059 if (!expiry_cache_is_inactive(pct)) {
1060 u64 samples[CPUCLOCK_MAX];
1062 task_sample_cputime(tsk, samples);
1063 if (task_cputimers_expired(samples, pct))
1068 pct = &sig->posix_cputimers;
1070 * Check if thread group timers expired when timers are active and
1071 * no other thread in the group is already handling expiry for
1072 * thread group cputimers. These fields are read without the
1073 * sighand lock. However, this is fine because this is meant to be
1074 * a fastpath heuristic to determine whether we should try to
1075 * acquire the sighand lock to handle timer expiry.
1077 * In the worst case scenario, if concurrently timers_active is set
1078 * or expiry_active is cleared, but the current thread doesn't see
1079 * the change yet, the timer checks are delayed until the next
1080 * thread in the group gets a scheduler interrupt to handle the
1081 * timer. This isn't an issue in practice because these types of
1082 * delays with signals actually getting sent are expected.
1084 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1085 u64 samples[CPUCLOCK_MAX];
1087 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1090 if (task_cputimers_expired(samples, pct))
1094 if (dl_task(tsk) && tsk->dl.dl_overrun)
1101 * This is called from the timer interrupt handler. The irq handler has
1102 * already updated our counts. We need to check if any timers fire now.
1103 * Interrupts are disabled.
1105 void run_posix_cpu_timers(void)
1107 struct task_struct *tsk = current;
1108 struct k_itimer *timer, *next;
1109 unsigned long flags;
1112 lockdep_assert_irqs_disabled();
1115 * The fast path checks that there are no expired thread or thread
1116 * group timers. If that's so, just return.
1118 if (!fastpath_timer_check(tsk))
1121 lockdep_posixtimer_enter();
1122 if (!lock_task_sighand(tsk, &flags)) {
1123 lockdep_posixtimer_exit();
1127 * Here we take off tsk->signal->cpu_timers[N] and
1128 * tsk->cpu_timers[N] all the timers that are firing, and
1129 * put them on the firing list.
1131 check_thread_timers(tsk, &firing);
1133 check_process_timers(tsk, &firing);
1136 * We must release these locks before taking any timer's lock.
1137 * There is a potential race with timer deletion here, as the
1138 * siglock now protects our private firing list. We have set
1139 * the firing flag in each timer, so that a deletion attempt
1140 * that gets the timer lock before we do will give it up and
1141 * spin until we've taken care of that timer below.
1143 unlock_task_sighand(tsk, &flags);
1146 * Now that all the timers on our list have the firing flag,
1147 * no one will touch their list entries but us. We'll take
1148 * each timer's lock before clearing its firing flag, so no
1149 * timer call will interfere.
1151 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1154 spin_lock(&timer->it_lock);
1155 list_del_init(&timer->it.cpu.elist);
1156 cpu_firing = timer->it.cpu.firing;
1157 timer->it.cpu.firing = 0;
1159 * The firing flag is -1 if we collided with a reset
1160 * of the timer, which already reported this
1161 * almost-firing as an overrun. So don't generate an event.
1163 if (likely(cpu_firing >= 0))
1164 cpu_timer_fire(timer);
1165 spin_unlock(&timer->it_lock);
1167 lockdep_posixtimer_exit();
1171 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1172 * The tsk->sighand->siglock must be held by the caller.
1174 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1175 u64 *newval, u64 *oldval)
1179 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1182 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1183 now = cpu_clock_sample_group(clkid, tsk, true);
1187 * We are setting itimer. The *oldval is absolute and we update
1188 * it to be relative, *newval argument is relative and we update
1189 * it to be absolute.
1192 if (*oldval <= now) {
1193 /* Just about to fire. */
1194 *oldval = TICK_NSEC;
1206 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1207 * expiry cache is also used by RLIMIT_CPU!.
1209 if (*newval < *nextevt)
1212 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1215 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1216 const struct timespec64 *rqtp)
1218 struct itimerspec64 it;
1219 struct k_itimer timer;
1224 * Set up a temporary timer and then wait for it to go off.
1226 memset(&timer, 0, sizeof timer);
1227 spin_lock_init(&timer.it_lock);
1228 timer.it_clock = which_clock;
1229 timer.it_overrun = -1;
1230 error = posix_cpu_timer_create(&timer);
1231 timer.it_process = current;
1234 static struct itimerspec64 zero_it;
1235 struct restart_block *restart;
1237 memset(&it, 0, sizeof(it));
1238 it.it_value = *rqtp;
1240 spin_lock_irq(&timer.it_lock);
1241 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1243 spin_unlock_irq(&timer.it_lock);
1247 while (!signal_pending(current)) {
1248 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1250 * Our timer fired and was reset, below
1251 * deletion can not fail.
1253 posix_cpu_timer_del(&timer);
1254 spin_unlock_irq(&timer.it_lock);
1259 * Block until cpu_timer_fire (or a signal) wakes us.
1261 __set_current_state(TASK_INTERRUPTIBLE);
1262 spin_unlock_irq(&timer.it_lock);
1264 spin_lock_irq(&timer.it_lock);
1268 * We were interrupted by a signal.
1270 expires = cpu_timer_getexpires(&timer.it.cpu);
1271 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1274 * Timer is now unarmed, deletion can not fail.
1276 posix_cpu_timer_del(&timer);
1278 spin_unlock_irq(&timer.it_lock);
1280 while (error == TIMER_RETRY) {
1282 * We need to handle case when timer was or is in the
1283 * middle of firing. In other cases we already freed
1286 spin_lock_irq(&timer.it_lock);
1287 error = posix_cpu_timer_del(&timer);
1288 spin_unlock_irq(&timer.it_lock);
1291 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1293 * It actually did fire already.
1298 error = -ERESTART_RESTARTBLOCK;
1300 * Report back to the user the time still remaining.
1302 restart = ¤t->restart_block;
1303 restart->nanosleep.expires = expires;
1304 if (restart->nanosleep.type != TT_NONE)
1305 error = nanosleep_copyout(restart, &it.it_value);
1311 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1313 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1314 const struct timespec64 *rqtp)
1316 struct restart_block *restart_block = ¤t->restart_block;
1320 * Diagnose required errors first.
1322 if (CPUCLOCK_PERTHREAD(which_clock) &&
1323 (CPUCLOCK_PID(which_clock) == 0 ||
1324 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1327 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1329 if (error == -ERESTART_RESTARTBLOCK) {
1331 if (flags & TIMER_ABSTIME)
1332 return -ERESTARTNOHAND;
1334 restart_block->fn = posix_cpu_nsleep_restart;
1335 restart_block->nanosleep.clockid = which_clock;
1340 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1342 clockid_t which_clock = restart_block->nanosleep.clockid;
1343 struct timespec64 t;
1345 t = ns_to_timespec64(restart_block->nanosleep.expires);
1347 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1350 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1351 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1353 static int process_cpu_clock_getres(const clockid_t which_clock,
1354 struct timespec64 *tp)
1356 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1358 static int process_cpu_clock_get(const clockid_t which_clock,
1359 struct timespec64 *tp)
1361 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1363 static int process_cpu_timer_create(struct k_itimer *timer)
1365 timer->it_clock = PROCESS_CLOCK;
1366 return posix_cpu_timer_create(timer);
1368 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1369 const struct timespec64 *rqtp)
1371 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1373 static int thread_cpu_clock_getres(const clockid_t which_clock,
1374 struct timespec64 *tp)
1376 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1378 static int thread_cpu_clock_get(const clockid_t which_clock,
1379 struct timespec64 *tp)
1381 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1383 static int thread_cpu_timer_create(struct k_itimer *timer)
1385 timer->it_clock = THREAD_CLOCK;
1386 return posix_cpu_timer_create(timer);
1389 const struct k_clock clock_posix_cpu = {
1390 .clock_getres = posix_cpu_clock_getres,
1391 .clock_set = posix_cpu_clock_set,
1392 .clock_get_timespec = posix_cpu_clock_get,
1393 .timer_create = posix_cpu_timer_create,
1394 .nsleep = posix_cpu_nsleep,
1395 .timer_set = posix_cpu_timer_set,
1396 .timer_del = posix_cpu_timer_del,
1397 .timer_get = posix_cpu_timer_get,
1398 .timer_rearm = posix_cpu_timer_rearm,
1401 const struct k_clock clock_process = {
1402 .clock_getres = process_cpu_clock_getres,
1403 .clock_get_timespec = process_cpu_clock_get,
1404 .timer_create = process_cpu_timer_create,
1405 .nsleep = process_cpu_nsleep,
1408 const struct k_clock clock_thread = {
1409 .clock_getres = thread_cpu_clock_getres,
1410 .clock_get_timespec = thread_cpu_clock_get,
1411 .timer_create = thread_cpu_timer_create,