1 // SPDX-License-Identifier: GPL-2.0
3 * Implement CPU time clocks for the POSIX clock interface.
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
19 #include "posix-timers.h"
21 static void posix_cpu_timer_rearm(struct k_itimer *timer);
23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
25 posix_cputimers_init(pct);
26 if (cpu_limit != RLIM_INFINITY) {
27 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
28 pct->timers_active = true;
33 * Called after updating RLIMIT_CPU to run cpu timer and update
34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35 * necessary. Needs siglock protection since other code may update the
36 * expiration cache as well.
38 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
40 u64 nsecs = rlim_new * NSEC_PER_SEC;
42 spin_lock_irq(&task->sighand->siglock);
43 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
44 spin_unlock_irq(&task->sighand->siglock);
48 * Functions for validating access to tasks.
50 static struct task_struct *lookup_task(const pid_t pid, bool thread,
53 struct task_struct *p;
56 * If the encoded PID is 0, then the timer is targeted at current
57 * or the process to which current belongs.
60 return thread ? current : current->group_leader;
62 p = find_task_by_vpid(pid);
67 return same_thread_group(p, current) ? p : NULL;
71 * For clock_gettime(PROCESS) the task does not need to be
72 * the actual group leader. tsk->sighand gives
73 * access to the group's clock.
75 * Timers need the group leader because they take a
76 * reference on it and store the task pointer until the
79 return (p == current || thread_group_leader(p)) ? p : NULL;
83 * For processes require that p is group leader.
85 return has_group_leader_pid(p) ? p : NULL;
88 static struct task_struct *__get_task_for_clock(const clockid_t clock,
89 bool getref, bool gettime)
91 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
92 const pid_t pid = CPUCLOCK_PID(clock);
93 struct task_struct *p;
95 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
99 p = lookup_task(pid, thread, gettime);
106 static inline struct task_struct *get_task_for_clock(const clockid_t clock)
108 return __get_task_for_clock(clock, true, false);
111 static inline struct task_struct *get_task_for_clock_get(const clockid_t clock)
113 return __get_task_for_clock(clock, true, true);
116 static inline int validate_clock_permissions(const clockid_t clock)
118 return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
122 * Update expiry time from increment, and increase overrun count,
123 * given the current clock sample.
125 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
127 u64 delta, incr, expires = timer->it.cpu.node.expires;
130 if (!timer->it_interval)
136 incr = timer->it_interval;
137 delta = now + incr - expires;
139 /* Don't use (incr*2 < delta), incr*2 might overflow. */
140 for (i = 0; incr < delta - incr; i++)
143 for (; i >= 0; incr >>= 1, i--) {
147 timer->it.cpu.node.expires += incr;
148 timer->it_overrun += 1LL << i;
151 return timer->it.cpu.node.expires;
154 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
155 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
157 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
158 ~pct->bases[CPUCLOCK_VIRT].nextevt |
159 ~pct->bases[CPUCLOCK_SCHED].nextevt);
163 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
165 int error = validate_clock_permissions(which_clock);
169 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
170 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
172 * If sched_clock is using a cycle counter, we
173 * don't have any idea of its true resolution
174 * exported, but it is much more than 1s/HZ.
183 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
185 int error = validate_clock_permissions(clock);
188 * You can never reset a CPU clock, but we check for other errors
189 * in the call before failing with EPERM.
191 return error ? : -EPERM;
195 * Sample a per-thread clock for the given task. clkid is validated.
197 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
201 if (clkid == CPUCLOCK_SCHED)
202 return task_sched_runtime(p);
204 task_cputime(p, &utime, &stime);
208 return utime + stime;
217 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
219 samples[CPUCLOCK_PROF] = stime + utime;
220 samples[CPUCLOCK_VIRT] = utime;
221 samples[CPUCLOCK_SCHED] = rtime;
224 static void task_sample_cputime(struct task_struct *p, u64 *samples)
228 task_cputime(p, &utime, &stime);
229 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
232 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
235 u64 stime, utime, rtime;
237 utime = atomic64_read(&at->utime);
238 stime = atomic64_read(&at->stime);
239 rtime = atomic64_read(&at->sum_exec_runtime);
240 store_samples(samples, stime, utime, rtime);
244 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
245 * to avoid race conditions with concurrent updates to cputime.
247 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
251 curr_cputime = atomic64_read(cputime);
252 if (sum_cputime > curr_cputime) {
253 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
258 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
259 struct task_cputime *sum)
261 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
262 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
263 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
267 * thread_group_sample_cputime - Sample cputime for a given task
268 * @tsk: Task for which cputime needs to be started
269 * @samples: Storage for time samples
271 * Called from sys_getitimer() to calculate the expiry time of an active
272 * timer. That means group cputime accounting is already active. Called
273 * with task sighand lock held.
275 * Updates @times with an uptodate sample of the thread group cputimes.
277 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
279 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
280 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
282 WARN_ON_ONCE(!pct->timers_active);
284 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
288 * thread_group_start_cputime - Start cputime and return a sample
289 * @tsk: Task for which cputime needs to be started
290 * @samples: Storage for time samples
292 * The thread group cputime accouting is avoided when there are no posix
293 * CPU timers armed. Before starting a timer it's required to check whether
294 * the time accounting is active. If not, a full update of the atomic
295 * accounting store needs to be done and the accounting enabled.
297 * Updates @times with an uptodate sample of the thread group cputimes.
299 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
301 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
302 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
304 /* Check if cputimer isn't running. This is accessed without locking. */
305 if (!READ_ONCE(pct->timers_active)) {
306 struct task_cputime sum;
309 * The POSIX timer interface allows for absolute time expiry
310 * values through the TIMER_ABSTIME flag, therefore we have
311 * to synchronize the timer to the clock every time we start it.
313 thread_group_cputime(tsk, &sum);
314 update_gt_cputime(&cputimer->cputime_atomic, &sum);
317 * We're setting timers_active without a lock. Ensure this
318 * only gets written to in one operation. We set it after
319 * update_gt_cputime() as a small optimization, but
320 * barriers are not required because update_gt_cputime()
321 * can handle concurrent updates.
323 WRITE_ONCE(pct->timers_active, true);
325 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
328 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
330 struct task_cputime ct;
332 thread_group_cputime(tsk, &ct);
333 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
337 * Sample a process (thread group) clock for the given task clkid. If the
338 * group's cputime accounting is already enabled, read the atomic
339 * store. Otherwise a full update is required. clkid is already validated.
341 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
344 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
345 struct posix_cputimers *pct = &p->signal->posix_cputimers;
346 u64 samples[CPUCLOCK_MAX];
348 if (!READ_ONCE(pct->timers_active)) {
350 thread_group_start_cputime(p, samples);
352 __thread_group_cputime(p, samples);
354 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
357 return samples[clkid];
360 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
362 const clockid_t clkid = CPUCLOCK_WHICH(clock);
363 struct task_struct *tsk;
366 tsk = get_task_for_clock_get(clock);
370 if (CPUCLOCK_PERTHREAD(clock))
371 t = cpu_clock_sample(clkid, tsk);
373 t = cpu_clock_sample_group(clkid, tsk, false);
374 put_task_struct(tsk);
376 *tp = ns_to_timespec64(t);
381 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
382 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
383 * new timer already all-zeros initialized.
385 static int posix_cpu_timer_create(struct k_itimer *new_timer)
387 struct task_struct *p = get_task_for_clock(new_timer->it_clock);
392 new_timer->kclock = &clock_posix_cpu;
393 timerqueue_init(&new_timer->it.cpu.node);
394 new_timer->it.cpu.task = p;
399 * Clean up a CPU-clock timer that is about to be destroyed.
400 * This is called from timer deletion with the timer already locked.
401 * If we return TIMER_RETRY, it's necessary to release the timer's lock
402 * and try again. (This happens when the timer is in the middle of firing.)
404 static int posix_cpu_timer_del(struct k_itimer *timer)
406 struct cpu_timer *ctmr = &timer->it.cpu;
407 struct task_struct *p = ctmr->task;
408 struct sighand_struct *sighand;
412 if (WARN_ON_ONCE(!p))
416 * Protect against sighand release/switch in exit/exec and process/
417 * thread timer list entry concurrent read/writes.
419 sighand = lock_task_sighand(p, &flags);
420 if (unlikely(sighand == NULL)) {
422 * This raced with the reaping of the task. The exit cleanup
423 * should have removed this timer from the timer queue.
425 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
427 if (timer->it.cpu.firing)
430 cpu_timer_dequeue(ctmr);
432 unlock_task_sighand(p, &flags);
441 static void cleanup_timerqueue(struct timerqueue_head *head)
443 struct timerqueue_node *node;
444 struct cpu_timer *ctmr;
446 while ((node = timerqueue_getnext(head))) {
447 timerqueue_del(head, node);
448 ctmr = container_of(node, struct cpu_timer, node);
454 * Clean out CPU timers which are still armed when a thread exits. The
455 * timers are only removed from the list. No other updates are done. The
456 * corresponding posix timers are still accessible, but cannot be rearmed.
458 * This must be called with the siglock held.
460 static void cleanup_timers(struct posix_cputimers *pct)
462 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
463 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
464 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
468 * These are both called with the siglock held, when the current thread
469 * is being reaped. When the final (leader) thread in the group is reaped,
470 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
472 void posix_cpu_timers_exit(struct task_struct *tsk)
474 cleanup_timers(&tsk->posix_cputimers);
476 void posix_cpu_timers_exit_group(struct task_struct *tsk)
478 cleanup_timers(&tsk->signal->posix_cputimers);
482 * Insert the timer on the appropriate list before any timers that
483 * expire later. This must be called with the sighand lock held.
485 static void arm_timer(struct k_itimer *timer)
487 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
488 struct cpu_timer *ctmr = &timer->it.cpu;
489 u64 newexp = cpu_timer_getexpires(ctmr);
490 struct task_struct *p = ctmr->task;
491 struct posix_cputimer_base *base;
493 if (CPUCLOCK_PERTHREAD(timer->it_clock))
494 base = p->posix_cputimers.bases + clkidx;
496 base = p->signal->posix_cputimers.bases + clkidx;
498 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
502 * We are the new earliest-expiring POSIX 1.b timer, hence
503 * need to update expiration cache. Take into account that
504 * for process timers we share expiration cache with itimers
505 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
507 if (newexp < base->nextevt)
508 base->nextevt = newexp;
510 if (CPUCLOCK_PERTHREAD(timer->it_clock))
511 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
513 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
517 * The timer is locked, fire it and arrange for its reload.
519 static void cpu_timer_fire(struct k_itimer *timer)
521 struct cpu_timer *ctmr = &timer->it.cpu;
523 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
525 * User don't want any signal.
527 cpu_timer_setexpires(ctmr, 0);
528 } else if (unlikely(timer->sigq == NULL)) {
530 * This a special case for clock_nanosleep,
531 * not a normal timer from sys_timer_create.
533 wake_up_process(timer->it_process);
534 cpu_timer_setexpires(ctmr, 0);
535 } else if (!timer->it_interval) {
537 * One-shot timer. Clear it as soon as it's fired.
539 posix_timer_event(timer, 0);
540 cpu_timer_setexpires(ctmr, 0);
541 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
543 * The signal did not get queued because the signal
544 * was ignored, so we won't get any callback to
545 * reload the timer. But we need to keep it
546 * ticking in case the signal is deliverable next time.
548 posix_cpu_timer_rearm(timer);
549 ++timer->it_requeue_pending;
554 * Guts of sys_timer_settime for CPU timers.
555 * This is called with the timer locked and interrupts disabled.
556 * If we return TIMER_RETRY, it's necessary to release the timer's lock
557 * and try again. (This happens when the timer is in the middle of firing.)
559 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
560 struct itimerspec64 *new, struct itimerspec64 *old)
562 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
563 u64 old_expires, new_expires, old_incr, val;
564 struct cpu_timer *ctmr = &timer->it.cpu;
565 struct task_struct *p = ctmr->task;
566 struct sighand_struct *sighand;
570 if (WARN_ON_ONCE(!p))
574 * Use the to_ktime conversion because that clamps the maximum
575 * value to KTIME_MAX and avoid multiplication overflows.
577 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
580 * Protect against sighand release/switch in exit/exec and p->cpu_timers
581 * and p->signal->cpu_timers read/write in arm_timer()
583 sighand = lock_task_sighand(p, &flags);
585 * If p has just been reaped, we can no
586 * longer get any information about it at all.
588 if (unlikely(sighand == NULL))
592 * Disarm any old timer after extracting its expiry time.
594 old_incr = timer->it_interval;
595 old_expires = cpu_timer_getexpires(ctmr);
597 if (unlikely(timer->it.cpu.firing)) {
598 timer->it.cpu.firing = -1;
601 cpu_timer_dequeue(ctmr);
605 * We need to sample the current value to convert the new
606 * value from to relative and absolute, and to convert the
607 * old value from absolute to relative. To set a process
608 * timer, we need a sample to balance the thread expiry
609 * times (in arm_timer). With an absolute time, we must
610 * check if it's already passed. In short, we need a sample.
612 if (CPUCLOCK_PERTHREAD(timer->it_clock))
613 val = cpu_clock_sample(clkid, p);
615 val = cpu_clock_sample_group(clkid, p, true);
618 if (old_expires == 0) {
619 old->it_value.tv_sec = 0;
620 old->it_value.tv_nsec = 0;
623 * Update the timer in case it has overrun already.
624 * If it has, we'll report it as having overrun and
625 * with the next reloaded timer already ticking,
626 * though we are swallowing that pending
627 * notification here to install the new setting.
629 u64 exp = bump_cpu_timer(timer, val);
632 old_expires = exp - val;
633 old->it_value = ns_to_timespec64(old_expires);
635 old->it_value.tv_nsec = 1;
636 old->it_value.tv_sec = 0;
643 * We are colliding with the timer actually firing.
644 * Punt after filling in the timer's old value, and
645 * disable this firing since we are already reporting
646 * it as an overrun (thanks to bump_cpu_timer above).
648 unlock_task_sighand(p, &flags);
652 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
657 * Install the new expiry time (or zero).
658 * For a timer with no notification action, we don't actually
659 * arm the timer (we'll just fake it for timer_gettime).
661 cpu_timer_setexpires(ctmr, new_expires);
662 if (new_expires != 0 && val < new_expires) {
666 unlock_task_sighand(p, &flags);
668 * Install the new reload setting, and
669 * set up the signal and overrun bookkeeping.
671 timer->it_interval = timespec64_to_ktime(new->it_interval);
674 * This acts as a modification timestamp for the timer,
675 * so any automatic reload attempt will punt on seeing
676 * that we have reset the timer manually.
678 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
680 timer->it_overrun_last = 0;
681 timer->it_overrun = -1;
683 if (new_expires != 0 && !(val < new_expires)) {
685 * The designated time already passed, so we notify
686 * immediately, even if the thread never runs to
687 * accumulate more time on this clock.
689 cpu_timer_fire(timer);
695 old->it_interval = ns_to_timespec64(old_incr);
700 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
702 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
703 struct cpu_timer *ctmr = &timer->it.cpu;
704 u64 now, expires = cpu_timer_getexpires(ctmr);
705 struct task_struct *p = ctmr->task;
707 if (WARN_ON_ONCE(!p))
711 * Easy part: convert the reload time.
713 itp->it_interval = ktime_to_timespec64(timer->it_interval);
719 * Sample the clock to take the difference with the expiry time.
721 if (CPUCLOCK_PERTHREAD(timer->it_clock))
722 now = cpu_clock_sample(clkid, p);
724 now = cpu_clock_sample_group(clkid, p, false);
727 itp->it_value = ns_to_timespec64(expires - now);
730 * The timer should have expired already, but the firing
731 * hasn't taken place yet. Say it's just about to expire.
733 itp->it_value.tv_nsec = 1;
734 itp->it_value.tv_sec = 0;
738 #define MAX_COLLECTED 20
740 static u64 collect_timerqueue(struct timerqueue_head *head,
741 struct list_head *firing, u64 now)
743 struct timerqueue_node *next;
746 while ((next = timerqueue_getnext(head))) {
747 struct cpu_timer *ctmr;
750 ctmr = container_of(next, struct cpu_timer, node);
751 expires = cpu_timer_getexpires(ctmr);
752 /* Limit the number of timers to expire at once */
753 if (++i == MAX_COLLECTED || now < expires)
757 cpu_timer_dequeue(ctmr);
758 list_add_tail(&ctmr->elist, firing);
764 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
765 struct list_head *firing)
767 struct posix_cputimer_base *base = pct->bases;
770 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
771 base->nextevt = collect_timerqueue(&base->tqhead, firing,
776 static inline void check_dl_overrun(struct task_struct *tsk)
778 if (tsk->dl.dl_overrun) {
779 tsk->dl.dl_overrun = 0;
780 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
784 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
789 if (print_fatal_signals) {
790 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
791 rt ? "RT" : "CPU", hard ? "hard" : "soft",
792 current->comm, task_pid_nr(current));
794 __group_send_sig_info(signo, SEND_SIG_PRIV, current);
799 * Check for any per-thread CPU timers that have fired and move them off
800 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
801 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
803 static void check_thread_timers(struct task_struct *tsk,
804 struct list_head *firing)
806 struct posix_cputimers *pct = &tsk->posix_cputimers;
807 u64 samples[CPUCLOCK_MAX];
811 check_dl_overrun(tsk);
813 if (expiry_cache_is_inactive(pct))
816 task_sample_cputime(tsk, samples);
817 collect_posix_cputimers(pct, samples, firing);
820 * Check for the special case thread timers.
822 soft = task_rlimit(tsk, RLIMIT_RTTIME);
823 if (soft != RLIM_INFINITY) {
824 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
825 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
826 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
828 /* At the hard limit, send SIGKILL. No further action. */
829 if (hard != RLIM_INFINITY &&
830 check_rlimit(rttime, hard, SIGKILL, true, true))
833 /* At the soft limit, send a SIGXCPU every second */
834 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
835 soft += USEC_PER_SEC;
836 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
840 if (expiry_cache_is_inactive(pct))
841 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
844 static inline void stop_process_timers(struct signal_struct *sig)
846 struct posix_cputimers *pct = &sig->posix_cputimers;
848 /* Turn off the active flag. This is done without locking. */
849 WRITE_ONCE(pct->timers_active, false);
850 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
853 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
854 u64 *expires, u64 cur_time, int signo)
859 if (cur_time >= it->expires) {
861 it->expires += it->incr;
865 trace_itimer_expire(signo == SIGPROF ?
866 ITIMER_PROF : ITIMER_VIRTUAL,
867 task_tgid(tsk), cur_time);
868 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
871 if (it->expires && it->expires < *expires)
872 *expires = it->expires;
876 * Check for any per-thread CPU timers that have fired and move them
877 * off the tsk->*_timers list onto the firing list. Per-thread timers
878 * have already been taken off.
880 static void check_process_timers(struct task_struct *tsk,
881 struct list_head *firing)
883 struct signal_struct *const sig = tsk->signal;
884 struct posix_cputimers *pct = &sig->posix_cputimers;
885 u64 samples[CPUCLOCK_MAX];
889 * If there are no active process wide timers (POSIX 1.b, itimers,
890 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
891 * processing when there is already another task handling them.
893 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
897 * Signify that a thread is checking for process timers.
898 * Write access to this field is protected by the sighand lock.
900 pct->expiry_active = true;
903 * Collect the current process totals. Group accounting is active
904 * so the sample can be taken directly.
906 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
907 collect_posix_cputimers(pct, samples, firing);
910 * Check for the special case process timers.
912 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
913 &pct->bases[CPUCLOCK_PROF].nextevt,
914 samples[CPUCLOCK_PROF], SIGPROF);
915 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
916 &pct->bases[CPUCLOCK_VIRT].nextevt,
917 samples[CPUCLOCK_VIRT], SIGVTALRM);
919 soft = task_rlimit(tsk, RLIMIT_CPU);
920 if (soft != RLIM_INFINITY) {
921 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
922 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
923 u64 ptime = samples[CPUCLOCK_PROF];
924 u64 softns = (u64)soft * NSEC_PER_SEC;
925 u64 hardns = (u64)hard * NSEC_PER_SEC;
927 /* At the hard limit, send SIGKILL. No further action. */
928 if (hard != RLIM_INFINITY &&
929 check_rlimit(ptime, hardns, SIGKILL, false, true))
932 /* At the soft limit, send a SIGXCPU every second */
933 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
934 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
935 softns += NSEC_PER_SEC;
938 /* Update the expiry cache */
939 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
940 pct->bases[CPUCLOCK_PROF].nextevt = softns;
943 if (expiry_cache_is_inactive(pct))
944 stop_process_timers(sig);
946 pct->expiry_active = false;
950 * This is called from the signal code (via posixtimer_rearm)
951 * when the last timer signal was delivered and we have to reload the timer.
953 static void posix_cpu_timer_rearm(struct k_itimer *timer)
955 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
956 struct cpu_timer *ctmr = &timer->it.cpu;
957 struct task_struct *p = ctmr->task;
958 struct sighand_struct *sighand;
962 if (WARN_ON_ONCE(!p))
966 * Fetch the current sample and update the timer's expiry time.
968 if (CPUCLOCK_PERTHREAD(timer->it_clock))
969 now = cpu_clock_sample(clkid, p);
971 now = cpu_clock_sample_group(clkid, p, true);
973 bump_cpu_timer(timer, now);
975 /* Protect timer list r/w in arm_timer() */
976 sighand = lock_task_sighand(p, &flags);
977 if (unlikely(sighand == NULL))
981 * Now re-arm for the new expiry time.
984 unlock_task_sighand(p, &flags);
988 * task_cputimers_expired - Check whether posix CPU timers are expired
990 * @samples: Array of current samples for the CPUCLOCK clocks
991 * @pct: Pointer to a posix_cputimers container
993 * Returns true if any member of @samples is greater than the corresponding
994 * member of @pct->bases[CLK].nextevt. False otherwise
997 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1001 for (i = 0; i < CPUCLOCK_MAX; i++) {
1002 if (samples[i] >= pct->bases[i].nextevt)
1009 * fastpath_timer_check - POSIX CPU timers fast path.
1011 * @tsk: The task (thread) being checked.
1013 * Check the task and thread group timers. If both are zero (there are no
1014 * timers set) return false. Otherwise snapshot the task and thread group
1015 * timers and compare them with the corresponding expiration times. Return
1016 * true if a timer has expired, else return false.
1018 static inline bool fastpath_timer_check(struct task_struct *tsk)
1020 struct posix_cputimers *pct = &tsk->posix_cputimers;
1021 struct signal_struct *sig;
1023 if (!expiry_cache_is_inactive(pct)) {
1024 u64 samples[CPUCLOCK_MAX];
1026 task_sample_cputime(tsk, samples);
1027 if (task_cputimers_expired(samples, pct))
1032 pct = &sig->posix_cputimers;
1034 * Check if thread group timers expired when timers are active and
1035 * no other thread in the group is already handling expiry for
1036 * thread group cputimers. These fields are read without the
1037 * sighand lock. However, this is fine because this is meant to be
1038 * a fastpath heuristic to determine whether we should try to
1039 * acquire the sighand lock to handle timer expiry.
1041 * In the worst case scenario, if concurrently timers_active is set
1042 * or expiry_active is cleared, but the current thread doesn't see
1043 * the change yet, the timer checks are delayed until the next
1044 * thread in the group gets a scheduler interrupt to handle the
1045 * timer. This isn't an issue in practice because these types of
1046 * delays with signals actually getting sent are expected.
1048 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1049 u64 samples[CPUCLOCK_MAX];
1051 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1054 if (task_cputimers_expired(samples, pct))
1058 if (dl_task(tsk) && tsk->dl.dl_overrun)
1065 * This is called from the timer interrupt handler. The irq handler has
1066 * already updated our counts. We need to check if any timers fire now.
1067 * Interrupts are disabled.
1069 void run_posix_cpu_timers(void)
1071 struct task_struct *tsk = current;
1072 struct k_itimer *timer, *next;
1073 unsigned long flags;
1076 lockdep_assert_irqs_disabled();
1079 * The fast path checks that there are no expired thread or thread
1080 * group timers. If that's so, just return.
1082 if (!fastpath_timer_check(tsk))
1085 if (!lock_task_sighand(tsk, &flags))
1088 * Here we take off tsk->signal->cpu_timers[N] and
1089 * tsk->cpu_timers[N] all the timers that are firing, and
1090 * put them on the firing list.
1092 check_thread_timers(tsk, &firing);
1094 check_process_timers(tsk, &firing);
1097 * We must release these locks before taking any timer's lock.
1098 * There is a potential race with timer deletion here, as the
1099 * siglock now protects our private firing list. We have set
1100 * the firing flag in each timer, so that a deletion attempt
1101 * that gets the timer lock before we do will give it up and
1102 * spin until we've taken care of that timer below.
1104 unlock_task_sighand(tsk, &flags);
1107 * Now that all the timers on our list have the firing flag,
1108 * no one will touch their list entries but us. We'll take
1109 * each timer's lock before clearing its firing flag, so no
1110 * timer call will interfere.
1112 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1115 spin_lock(&timer->it_lock);
1116 list_del_init(&timer->it.cpu.elist);
1117 cpu_firing = timer->it.cpu.firing;
1118 timer->it.cpu.firing = 0;
1120 * The firing flag is -1 if we collided with a reset
1121 * of the timer, which already reported this
1122 * almost-firing as an overrun. So don't generate an event.
1124 if (likely(cpu_firing >= 0))
1125 cpu_timer_fire(timer);
1126 spin_unlock(&timer->it_lock);
1131 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1132 * The tsk->sighand->siglock must be held by the caller.
1134 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1135 u64 *newval, u64 *oldval)
1139 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1142 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1143 now = cpu_clock_sample_group(clkid, tsk, true);
1147 * We are setting itimer. The *oldval is absolute and we update
1148 * it to be relative, *newval argument is relative and we update
1149 * it to be absolute.
1152 if (*oldval <= now) {
1153 /* Just about to fire. */
1154 *oldval = TICK_NSEC;
1166 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1167 * expiry cache is also used by RLIMIT_CPU!.
1169 if (*newval < *nextevt)
1172 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1175 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1176 const struct timespec64 *rqtp)
1178 struct itimerspec64 it;
1179 struct k_itimer timer;
1184 * Set up a temporary timer and then wait for it to go off.
1186 memset(&timer, 0, sizeof timer);
1187 spin_lock_init(&timer.it_lock);
1188 timer.it_clock = which_clock;
1189 timer.it_overrun = -1;
1190 error = posix_cpu_timer_create(&timer);
1191 timer.it_process = current;
1194 static struct itimerspec64 zero_it;
1195 struct restart_block *restart;
1197 memset(&it, 0, sizeof(it));
1198 it.it_value = *rqtp;
1200 spin_lock_irq(&timer.it_lock);
1201 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1203 spin_unlock_irq(&timer.it_lock);
1207 while (!signal_pending(current)) {
1208 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1210 * Our timer fired and was reset, below
1211 * deletion can not fail.
1213 posix_cpu_timer_del(&timer);
1214 spin_unlock_irq(&timer.it_lock);
1219 * Block until cpu_timer_fire (or a signal) wakes us.
1221 __set_current_state(TASK_INTERRUPTIBLE);
1222 spin_unlock_irq(&timer.it_lock);
1224 spin_lock_irq(&timer.it_lock);
1228 * We were interrupted by a signal.
1230 expires = cpu_timer_getexpires(&timer.it.cpu);
1231 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1234 * Timer is now unarmed, deletion can not fail.
1236 posix_cpu_timer_del(&timer);
1238 spin_unlock_irq(&timer.it_lock);
1240 while (error == TIMER_RETRY) {
1242 * We need to handle case when timer was or is in the
1243 * middle of firing. In other cases we already freed
1246 spin_lock_irq(&timer.it_lock);
1247 error = posix_cpu_timer_del(&timer);
1248 spin_unlock_irq(&timer.it_lock);
1251 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1253 * It actually did fire already.
1258 error = -ERESTART_RESTARTBLOCK;
1260 * Report back to the user the time still remaining.
1262 restart = ¤t->restart_block;
1263 restart->nanosleep.expires = expires;
1264 if (restart->nanosleep.type != TT_NONE)
1265 error = nanosleep_copyout(restart, &it.it_value);
1271 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1273 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1274 const struct timespec64 *rqtp)
1276 struct restart_block *restart_block = ¤t->restart_block;
1280 * Diagnose required errors first.
1282 if (CPUCLOCK_PERTHREAD(which_clock) &&
1283 (CPUCLOCK_PID(which_clock) == 0 ||
1284 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1287 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1289 if (error == -ERESTART_RESTARTBLOCK) {
1291 if (flags & TIMER_ABSTIME)
1292 return -ERESTARTNOHAND;
1294 restart_block->fn = posix_cpu_nsleep_restart;
1295 restart_block->nanosleep.clockid = which_clock;
1300 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1302 clockid_t which_clock = restart_block->nanosleep.clockid;
1303 struct timespec64 t;
1305 t = ns_to_timespec64(restart_block->nanosleep.expires);
1307 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1310 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1311 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1313 static int process_cpu_clock_getres(const clockid_t which_clock,
1314 struct timespec64 *tp)
1316 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1318 static int process_cpu_clock_get(const clockid_t which_clock,
1319 struct timespec64 *tp)
1321 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1323 static int process_cpu_timer_create(struct k_itimer *timer)
1325 timer->it_clock = PROCESS_CLOCK;
1326 return posix_cpu_timer_create(timer);
1328 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1329 const struct timespec64 *rqtp)
1331 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1333 static int thread_cpu_clock_getres(const clockid_t which_clock,
1334 struct timespec64 *tp)
1336 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1338 static int thread_cpu_clock_get(const clockid_t which_clock,
1339 struct timespec64 *tp)
1341 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1343 static int thread_cpu_timer_create(struct k_itimer *timer)
1345 timer->it_clock = THREAD_CLOCK;
1346 return posix_cpu_timer_create(timer);
1349 const struct k_clock clock_posix_cpu = {
1350 .clock_getres = posix_cpu_clock_getres,
1351 .clock_set = posix_cpu_clock_set,
1352 .clock_get_timespec = posix_cpu_clock_get,
1353 .timer_create = posix_cpu_timer_create,
1354 .nsleep = posix_cpu_nsleep,
1355 .timer_set = posix_cpu_timer_set,
1356 .timer_del = posix_cpu_timer_del,
1357 .timer_get = posix_cpu_timer_get,
1358 .timer_rearm = posix_cpu_timer_rearm,
1361 const struct k_clock clock_process = {
1362 .clock_getres = process_cpu_clock_getres,
1363 .clock_get_timespec = process_cpu_clock_get,
1364 .timer_create = process_cpu_timer_create,
1365 .nsleep = process_cpu_nsleep,
1368 const struct k_clock clock_thread = {
1369 .clock_getres = thread_cpu_clock_getres,
1370 .clock_get_timespec = thread_cpu_clock_get,
1371 .timer_create = thread_cpu_timer_create,