2541bd89f20eb95f96d435d21f4eb7c4d1a0cda6
[sfrench/cifs-2.6.git] / kernel / time / posix-cpu-timers.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implement CPU time clocks for the POSIX clock interface.
4  */
5
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
18
19 #include "posix-timers.h"
20
21 static void posix_cpu_timer_rearm(struct k_itimer *timer);
22
23 /*
24  * Called after updating RLIMIT_CPU to run cpu timer and update
25  * tsk->signal->cputime_expires expiration cache if necessary. Needs
26  * siglock protection since other code may update expiration cache as
27  * well.
28  */
29 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
30 {
31         u64 nsecs = rlim_new * NSEC_PER_SEC;
32
33         spin_lock_irq(&task->sighand->siglock);
34         set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
35         spin_unlock_irq(&task->sighand->siglock);
36 }
37
38 static int check_clock(const clockid_t which_clock)
39 {
40         int error = 0;
41         struct task_struct *p;
42         const pid_t pid = CPUCLOCK_PID(which_clock);
43
44         if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
45                 return -EINVAL;
46
47         if (pid == 0)
48                 return 0;
49
50         rcu_read_lock();
51         p = find_task_by_vpid(pid);
52         if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
53                    same_thread_group(p, current) : has_group_leader_pid(p))) {
54                 error = -EINVAL;
55         }
56         rcu_read_unlock();
57
58         return error;
59 }
60
61 /*
62  * Update expiry time from increment, and increase overrun count,
63  * given the current clock sample.
64  */
65 static void bump_cpu_timer(struct k_itimer *timer, u64 now)
66 {
67         int i;
68         u64 delta, incr;
69
70         if (timer->it.cpu.incr == 0)
71                 return;
72
73         if (now < timer->it.cpu.expires)
74                 return;
75
76         incr = timer->it.cpu.incr;
77         delta = now + incr - timer->it.cpu.expires;
78
79         /* Don't use (incr*2 < delta), incr*2 might overflow. */
80         for (i = 0; incr < delta - incr; i++)
81                 incr = incr << 1;
82
83         for (; i >= 0; incr >>= 1, i--) {
84                 if (delta < incr)
85                         continue;
86
87                 timer->it.cpu.expires += incr;
88                 timer->it_overrun += 1 << i;
89                 delta -= incr;
90         }
91 }
92
93 /**
94  * task_cputime_zero - Check a task_cputime struct for all zero fields.
95  *
96  * @cputime:    The struct to compare.
97  *
98  * Checks @cputime to see if all fields are zero.  Returns true if all fields
99  * are zero, false if any field is nonzero.
100  */
101 static inline int task_cputime_zero(const struct task_cputime *cputime)
102 {
103         if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
104                 return 1;
105         return 0;
106 }
107
108 static inline u64 prof_ticks(struct task_struct *p)
109 {
110         u64 utime, stime;
111
112         task_cputime(p, &utime, &stime);
113
114         return utime + stime;
115 }
116 static inline u64 virt_ticks(struct task_struct *p)
117 {
118         u64 utime, stime;
119
120         task_cputime(p, &utime, &stime);
121
122         return utime;
123 }
124
125 static int
126 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
127 {
128         int error = check_clock(which_clock);
129         if (!error) {
130                 tp->tv_sec = 0;
131                 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
132                 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
133                         /*
134                          * If sched_clock is using a cycle counter, we
135                          * don't have any idea of its true resolution
136                          * exported, but it is much more than 1s/HZ.
137                          */
138                         tp->tv_nsec = 1;
139                 }
140         }
141         return error;
142 }
143
144 static int
145 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp)
146 {
147         /*
148          * You can never reset a CPU clock, but we check for other errors
149          * in the call before failing with EPERM.
150          */
151         int error = check_clock(which_clock);
152         if (error == 0) {
153                 error = -EPERM;
154         }
155         return error;
156 }
157
158
159 /*
160  * Sample a per-thread clock for the given task.
161  */
162 static int cpu_clock_sample(const clockid_t which_clock,
163                             struct task_struct *p, u64 *sample)
164 {
165         switch (CPUCLOCK_WHICH(which_clock)) {
166         default:
167                 return -EINVAL;
168         case CPUCLOCK_PROF:
169                 *sample = prof_ticks(p);
170                 break;
171         case CPUCLOCK_VIRT:
172                 *sample = virt_ticks(p);
173                 break;
174         case CPUCLOCK_SCHED:
175                 *sample = task_sched_runtime(p);
176                 break;
177         }
178         return 0;
179 }
180
181 /*
182  * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
183  * to avoid race conditions with concurrent updates to cputime.
184  */
185 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
186 {
187         u64 curr_cputime;
188 retry:
189         curr_cputime = atomic64_read(cputime);
190         if (sum_cputime > curr_cputime) {
191                 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
192                         goto retry;
193         }
194 }
195
196 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
197 {
198         __update_gt_cputime(&cputime_atomic->utime, sum->utime);
199         __update_gt_cputime(&cputime_atomic->stime, sum->stime);
200         __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
201 }
202
203 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
204 static inline void sample_cputime_atomic(struct task_cputime *times,
205                                          struct task_cputime_atomic *atomic_times)
206 {
207         times->utime = atomic64_read(&atomic_times->utime);
208         times->stime = atomic64_read(&atomic_times->stime);
209         times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
210 }
211
212 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
213 {
214         struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
215         struct task_cputime sum;
216
217         /* Check if cputimer isn't running. This is accessed without locking. */
218         if (!READ_ONCE(cputimer->running)) {
219                 /*
220                  * The POSIX timer interface allows for absolute time expiry
221                  * values through the TIMER_ABSTIME flag, therefore we have
222                  * to synchronize the timer to the clock every time we start it.
223                  */
224                 thread_group_cputime(tsk, &sum);
225                 update_gt_cputime(&cputimer->cputime_atomic, &sum);
226
227                 /*
228                  * We're setting cputimer->running without a lock. Ensure
229                  * this only gets written to in one operation. We set
230                  * running after update_gt_cputime() as a small optimization,
231                  * but barriers are not required because update_gt_cputime()
232                  * can handle concurrent updates.
233                  */
234                 WRITE_ONCE(cputimer->running, true);
235         }
236         sample_cputime_atomic(times, &cputimer->cputime_atomic);
237 }
238
239 /*
240  * Sample a process (thread group) clock for the given group_leader task.
241  * Must be called with task sighand lock held for safe while_each_thread()
242  * traversal.
243  */
244 static int cpu_clock_sample_group(const clockid_t which_clock,
245                                   struct task_struct *p,
246                                   u64 *sample)
247 {
248         struct task_cputime cputime;
249
250         switch (CPUCLOCK_WHICH(which_clock)) {
251         default:
252                 return -EINVAL;
253         case CPUCLOCK_PROF:
254                 thread_group_cputime(p, &cputime);
255                 *sample = cputime.utime + cputime.stime;
256                 break;
257         case CPUCLOCK_VIRT:
258                 thread_group_cputime(p, &cputime);
259                 *sample = cputime.utime;
260                 break;
261         case CPUCLOCK_SCHED:
262                 thread_group_cputime(p, &cputime);
263                 *sample = cputime.sum_exec_runtime;
264                 break;
265         }
266         return 0;
267 }
268
269 static int posix_cpu_clock_get_task(struct task_struct *tsk,
270                                     const clockid_t which_clock,
271                                     struct timespec64 *tp)
272 {
273         int err = -EINVAL;
274         u64 rtn;
275
276         if (CPUCLOCK_PERTHREAD(which_clock)) {
277                 if (same_thread_group(tsk, current))
278                         err = cpu_clock_sample(which_clock, tsk, &rtn);
279         } else {
280                 if (tsk == current || thread_group_leader(tsk))
281                         err = cpu_clock_sample_group(which_clock, tsk, &rtn);
282         }
283
284         if (!err)
285                 *tp = ns_to_timespec64(rtn);
286
287         return err;
288 }
289
290
291 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp)
292 {
293         const pid_t pid = CPUCLOCK_PID(which_clock);
294         int err = -EINVAL;
295
296         if (pid == 0) {
297                 /*
298                  * Special case constant value for our own clocks.
299                  * We don't have to do any lookup to find ourselves.
300                  */
301                 err = posix_cpu_clock_get_task(current, which_clock, tp);
302         } else {
303                 /*
304                  * Find the given PID, and validate that the caller
305                  * should be able to see it.
306                  */
307                 struct task_struct *p;
308                 rcu_read_lock();
309                 p = find_task_by_vpid(pid);
310                 if (p)
311                         err = posix_cpu_clock_get_task(p, which_clock, tp);
312                 rcu_read_unlock();
313         }
314
315         return err;
316 }
317
318 /*
319  * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
320  * This is called from sys_timer_create() and do_cpu_nanosleep() with the
321  * new timer already all-zeros initialized.
322  */
323 static int posix_cpu_timer_create(struct k_itimer *new_timer)
324 {
325         int ret = 0;
326         const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
327         struct task_struct *p;
328
329         if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
330                 return -EINVAL;
331
332         new_timer->kclock = &clock_posix_cpu;
333
334         INIT_LIST_HEAD(&new_timer->it.cpu.entry);
335
336         rcu_read_lock();
337         if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
338                 if (pid == 0) {
339                         p = current;
340                 } else {
341                         p = find_task_by_vpid(pid);
342                         if (p && !same_thread_group(p, current))
343                                 p = NULL;
344                 }
345         } else {
346                 if (pid == 0) {
347                         p = current->group_leader;
348                 } else {
349                         p = find_task_by_vpid(pid);
350                         if (p && !has_group_leader_pid(p))
351                                 p = NULL;
352                 }
353         }
354         new_timer->it.cpu.task = p;
355         if (p) {
356                 get_task_struct(p);
357         } else {
358                 ret = -EINVAL;
359         }
360         rcu_read_unlock();
361
362         return ret;
363 }
364
365 /*
366  * Clean up a CPU-clock timer that is about to be destroyed.
367  * This is called from timer deletion with the timer already locked.
368  * If we return TIMER_RETRY, it's necessary to release the timer's lock
369  * and try again.  (This happens when the timer is in the middle of firing.)
370  */
371 static int posix_cpu_timer_del(struct k_itimer *timer)
372 {
373         int ret = 0;
374         unsigned long flags;
375         struct sighand_struct *sighand;
376         struct task_struct *p = timer->it.cpu.task;
377
378         WARN_ON_ONCE(p == NULL);
379
380         /*
381          * Protect against sighand release/switch in exit/exec and process/
382          * thread timer list entry concurrent read/writes.
383          */
384         sighand = lock_task_sighand(p, &flags);
385         if (unlikely(sighand == NULL)) {
386                 /*
387                  * We raced with the reaping of the task.
388                  * The deletion should have cleared us off the list.
389                  */
390                 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
391         } else {
392                 if (timer->it.cpu.firing)
393                         ret = TIMER_RETRY;
394                 else
395                         list_del(&timer->it.cpu.entry);
396
397                 unlock_task_sighand(p, &flags);
398         }
399
400         if (!ret)
401                 put_task_struct(p);
402
403         return ret;
404 }
405
406 static void cleanup_timers_list(struct list_head *head)
407 {
408         struct cpu_timer_list *timer, *next;
409
410         list_for_each_entry_safe(timer, next, head, entry)
411                 list_del_init(&timer->entry);
412 }
413
414 /*
415  * Clean out CPU timers still ticking when a thread exited.  The task
416  * pointer is cleared, and the expiry time is replaced with the residual
417  * time for later timer_gettime calls to return.
418  * This must be called with the siglock held.
419  */
420 static void cleanup_timers(struct list_head *head)
421 {
422         cleanup_timers_list(head);
423         cleanup_timers_list(++head);
424         cleanup_timers_list(++head);
425 }
426
427 /*
428  * These are both called with the siglock held, when the current thread
429  * is being reaped.  When the final (leader) thread in the group is reaped,
430  * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
431  */
432 void posix_cpu_timers_exit(struct task_struct *tsk)
433 {
434         cleanup_timers(tsk->cpu_timers);
435 }
436 void posix_cpu_timers_exit_group(struct task_struct *tsk)
437 {
438         cleanup_timers(tsk->signal->cpu_timers);
439 }
440
441 static inline int expires_gt(u64 expires, u64 new_exp)
442 {
443         return expires == 0 || expires > new_exp;
444 }
445
446 /*
447  * Insert the timer on the appropriate list before any timers that
448  * expire later.  This must be called with the sighand lock held.
449  */
450 static void arm_timer(struct k_itimer *timer)
451 {
452         struct task_struct *p = timer->it.cpu.task;
453         struct list_head *head, *listpos;
454         struct task_cputime *cputime_expires;
455         struct cpu_timer_list *const nt = &timer->it.cpu;
456         struct cpu_timer_list *next;
457
458         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
459                 head = p->cpu_timers;
460                 cputime_expires = &p->cputime_expires;
461         } else {
462                 head = p->signal->cpu_timers;
463                 cputime_expires = &p->signal->cputime_expires;
464         }
465         head += CPUCLOCK_WHICH(timer->it_clock);
466
467         listpos = head;
468         list_for_each_entry(next, head, entry) {
469                 if (nt->expires < next->expires)
470                         break;
471                 listpos = &next->entry;
472         }
473         list_add(&nt->entry, listpos);
474
475         if (listpos == head) {
476                 u64 exp = nt->expires;
477
478                 /*
479                  * We are the new earliest-expiring POSIX 1.b timer, hence
480                  * need to update expiration cache. Take into account that
481                  * for process timers we share expiration cache with itimers
482                  * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
483                  */
484
485                 switch (CPUCLOCK_WHICH(timer->it_clock)) {
486                 case CPUCLOCK_PROF:
487                         if (expires_gt(cputime_expires->prof_exp, exp))
488                                 cputime_expires->prof_exp = exp;
489                         break;
490                 case CPUCLOCK_VIRT:
491                         if (expires_gt(cputime_expires->virt_exp, exp))
492                                 cputime_expires->virt_exp = exp;
493                         break;
494                 case CPUCLOCK_SCHED:
495                         if (expires_gt(cputime_expires->sched_exp, exp))
496                                 cputime_expires->sched_exp = exp;
497                         break;
498                 }
499                 if (CPUCLOCK_PERTHREAD(timer->it_clock))
500                         tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
501                 else
502                         tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
503         }
504 }
505
506 /*
507  * The timer is locked, fire it and arrange for its reload.
508  */
509 static void cpu_timer_fire(struct k_itimer *timer)
510 {
511         if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
512                 /*
513                  * User don't want any signal.
514                  */
515                 timer->it.cpu.expires = 0;
516         } else if (unlikely(timer->sigq == NULL)) {
517                 /*
518                  * This a special case for clock_nanosleep,
519                  * not a normal timer from sys_timer_create.
520                  */
521                 wake_up_process(timer->it_process);
522                 timer->it.cpu.expires = 0;
523         } else if (timer->it.cpu.incr == 0) {
524                 /*
525                  * One-shot timer.  Clear it as soon as it's fired.
526                  */
527                 posix_timer_event(timer, 0);
528                 timer->it.cpu.expires = 0;
529         } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
530                 /*
531                  * The signal did not get queued because the signal
532                  * was ignored, so we won't get any callback to
533                  * reload the timer.  But we need to keep it
534                  * ticking in case the signal is deliverable next time.
535                  */
536                 posix_cpu_timer_rearm(timer);
537                 ++timer->it_requeue_pending;
538         }
539 }
540
541 /*
542  * Sample a process (thread group) timer for the given group_leader task.
543  * Must be called with task sighand lock held for safe while_each_thread()
544  * traversal.
545  */
546 static int cpu_timer_sample_group(const clockid_t which_clock,
547                                   struct task_struct *p, u64 *sample)
548 {
549         struct task_cputime cputime;
550
551         thread_group_cputimer(p, &cputime);
552         switch (CPUCLOCK_WHICH(which_clock)) {
553         default:
554                 return -EINVAL;
555         case CPUCLOCK_PROF:
556                 *sample = cputime.utime + cputime.stime;
557                 break;
558         case CPUCLOCK_VIRT:
559                 *sample = cputime.utime;
560                 break;
561         case CPUCLOCK_SCHED:
562                 *sample = cputime.sum_exec_runtime;
563                 break;
564         }
565         return 0;
566 }
567
568 /*
569  * Guts of sys_timer_settime for CPU timers.
570  * This is called with the timer locked and interrupts disabled.
571  * If we return TIMER_RETRY, it's necessary to release the timer's lock
572  * and try again.  (This happens when the timer is in the middle of firing.)
573  */
574 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
575                                struct itimerspec64 *new, struct itimerspec64 *old)
576 {
577         unsigned long flags;
578         struct sighand_struct *sighand;
579         struct task_struct *p = timer->it.cpu.task;
580         u64 old_expires, new_expires, old_incr, val;
581         int ret;
582
583         WARN_ON_ONCE(p == NULL);
584
585         /*
586          * Use the to_ktime conversion because that clamps the maximum
587          * value to KTIME_MAX and avoid multiplication overflows.
588          */
589         new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
590
591         /*
592          * Protect against sighand release/switch in exit/exec and p->cpu_timers
593          * and p->signal->cpu_timers read/write in arm_timer()
594          */
595         sighand = lock_task_sighand(p, &flags);
596         /*
597          * If p has just been reaped, we can no
598          * longer get any information about it at all.
599          */
600         if (unlikely(sighand == NULL)) {
601                 return -ESRCH;
602         }
603
604         /*
605          * Disarm any old timer after extracting its expiry time.
606          */
607         lockdep_assert_irqs_disabled();
608
609         ret = 0;
610         old_incr = timer->it.cpu.incr;
611         old_expires = timer->it.cpu.expires;
612         if (unlikely(timer->it.cpu.firing)) {
613                 timer->it.cpu.firing = -1;
614                 ret = TIMER_RETRY;
615         } else
616                 list_del_init(&timer->it.cpu.entry);
617
618         /*
619          * We need to sample the current value to convert the new
620          * value from to relative and absolute, and to convert the
621          * old value from absolute to relative.  To set a process
622          * timer, we need a sample to balance the thread expiry
623          * times (in arm_timer).  With an absolute time, we must
624          * check if it's already passed.  In short, we need a sample.
625          */
626         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
627                 cpu_clock_sample(timer->it_clock, p, &val);
628         } else {
629                 cpu_timer_sample_group(timer->it_clock, p, &val);
630         }
631
632         if (old) {
633                 if (old_expires == 0) {
634                         old->it_value.tv_sec = 0;
635                         old->it_value.tv_nsec = 0;
636                 } else {
637                         /*
638                          * Update the timer in case it has
639                          * overrun already.  If it has,
640                          * we'll report it as having overrun
641                          * and with the next reloaded timer
642                          * already ticking, though we are
643                          * swallowing that pending
644                          * notification here to install the
645                          * new setting.
646                          */
647                         bump_cpu_timer(timer, val);
648                         if (val < timer->it.cpu.expires) {
649                                 old_expires = timer->it.cpu.expires - val;
650                                 old->it_value = ns_to_timespec64(old_expires);
651                         } else {
652                                 old->it_value.tv_nsec = 1;
653                                 old->it_value.tv_sec = 0;
654                         }
655                 }
656         }
657
658         if (unlikely(ret)) {
659                 /*
660                  * We are colliding with the timer actually firing.
661                  * Punt after filling in the timer's old value, and
662                  * disable this firing since we are already reporting
663                  * it as an overrun (thanks to bump_cpu_timer above).
664                  */
665                 unlock_task_sighand(p, &flags);
666                 goto out;
667         }
668
669         if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
670                 new_expires += val;
671         }
672
673         /*
674          * Install the new expiry time (or zero).
675          * For a timer with no notification action, we don't actually
676          * arm the timer (we'll just fake it for timer_gettime).
677          */
678         timer->it.cpu.expires = new_expires;
679         if (new_expires != 0 && val < new_expires) {
680                 arm_timer(timer);
681         }
682
683         unlock_task_sighand(p, &flags);
684         /*
685          * Install the new reload setting, and
686          * set up the signal and overrun bookkeeping.
687          */
688         timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
689
690         /*
691          * This acts as a modification timestamp for the timer,
692          * so any automatic reload attempt will punt on seeing
693          * that we have reset the timer manually.
694          */
695         timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
696                 ~REQUEUE_PENDING;
697         timer->it_overrun_last = 0;
698         timer->it_overrun = -1;
699
700         if (new_expires != 0 && !(val < new_expires)) {
701                 /*
702                  * The designated time already passed, so we notify
703                  * immediately, even if the thread never runs to
704                  * accumulate more time on this clock.
705                  */
706                 cpu_timer_fire(timer);
707         }
708
709         ret = 0;
710  out:
711         if (old)
712                 old->it_interval = ns_to_timespec64(old_incr);
713
714         return ret;
715 }
716
717 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
718 {
719         u64 now;
720         struct task_struct *p = timer->it.cpu.task;
721
722         WARN_ON_ONCE(p == NULL);
723
724         /*
725          * Easy part: convert the reload time.
726          */
727         itp->it_interval = ns_to_timespec64(timer->it.cpu.incr);
728
729         if (!timer->it.cpu.expires)
730                 return;
731
732         /*
733          * Sample the clock to take the difference with the expiry time.
734          */
735         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
736                 cpu_clock_sample(timer->it_clock, p, &now);
737         } else {
738                 struct sighand_struct *sighand;
739                 unsigned long flags;
740
741                 /*
742                  * Protect against sighand release/switch in exit/exec and
743                  * also make timer sampling safe if it ends up calling
744                  * thread_group_cputime().
745                  */
746                 sighand = lock_task_sighand(p, &flags);
747                 if (unlikely(sighand == NULL)) {
748                         /*
749                          * The process has been reaped.
750                          * We can't even collect a sample any more.
751                          * Call the timer disarmed, nothing else to do.
752                          */
753                         timer->it.cpu.expires = 0;
754                         return;
755                 } else {
756                         cpu_timer_sample_group(timer->it_clock, p, &now);
757                         unlock_task_sighand(p, &flags);
758                 }
759         }
760
761         if (now < timer->it.cpu.expires) {
762                 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now);
763         } else {
764                 /*
765                  * The timer should have expired already, but the firing
766                  * hasn't taken place yet.  Say it's just about to expire.
767                  */
768                 itp->it_value.tv_nsec = 1;
769                 itp->it_value.tv_sec = 0;
770         }
771 }
772
773 static unsigned long long
774 check_timers_list(struct list_head *timers,
775                   struct list_head *firing,
776                   unsigned long long curr)
777 {
778         int maxfire = 20;
779
780         while (!list_empty(timers)) {
781                 struct cpu_timer_list *t;
782
783                 t = list_first_entry(timers, struct cpu_timer_list, entry);
784
785                 if (!--maxfire || curr < t->expires)
786                         return t->expires;
787
788                 t->firing = 1;
789                 list_move_tail(&t->entry, firing);
790         }
791
792         return 0;
793 }
794
795 static inline void check_dl_overrun(struct task_struct *tsk)
796 {
797         if (tsk->dl.dl_overrun) {
798                 tsk->dl.dl_overrun = 0;
799                 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
800         }
801 }
802
803 /*
804  * Check for any per-thread CPU timers that have fired and move them off
805  * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
806  * tsk->it_*_expires values to reflect the remaining thread CPU timers.
807  */
808 static void check_thread_timers(struct task_struct *tsk,
809                                 struct list_head *firing)
810 {
811         struct list_head *timers = tsk->cpu_timers;
812         struct task_cputime *tsk_expires = &tsk->cputime_expires;
813         u64 expires;
814         unsigned long soft;
815
816         if (dl_task(tsk))
817                 check_dl_overrun(tsk);
818
819         /*
820          * If cputime_expires is zero, then there are no active
821          * per thread CPU timers.
822          */
823         if (task_cputime_zero(&tsk->cputime_expires))
824                 return;
825
826         expires = check_timers_list(timers, firing, prof_ticks(tsk));
827         tsk_expires->prof_exp = expires;
828
829         expires = check_timers_list(++timers, firing, virt_ticks(tsk));
830         tsk_expires->virt_exp = expires;
831
832         tsk_expires->sched_exp = check_timers_list(++timers, firing,
833                                                    tsk->se.sum_exec_runtime);
834
835         /*
836          * Check for the special case thread timers.
837          */
838         soft = task_rlimit(tsk, RLIMIT_RTTIME);
839         if (soft != RLIM_INFINITY) {
840                 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
841
842                 if (hard != RLIM_INFINITY &&
843                     tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
844                         /*
845                          * At the hard limit, we just die.
846                          * No need to calculate anything else now.
847                          */
848                         if (print_fatal_signals) {
849                                 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
850                                         tsk->comm, task_pid_nr(tsk));
851                         }
852                         __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
853                         return;
854                 }
855                 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
856                         /*
857                          * At the soft limit, send a SIGXCPU every second.
858                          */
859                         if (soft < hard) {
860                                 soft += USEC_PER_SEC;
861                                 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur =
862                                         soft;
863                         }
864                         if (print_fatal_signals) {
865                                 pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
866                                         tsk->comm, task_pid_nr(tsk));
867                         }
868                         __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
869                 }
870         }
871         if (task_cputime_zero(tsk_expires))
872                 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
873 }
874
875 static inline void stop_process_timers(struct signal_struct *sig)
876 {
877         struct thread_group_cputimer *cputimer = &sig->cputimer;
878
879         /* Turn off cputimer->running. This is done without locking. */
880         WRITE_ONCE(cputimer->running, false);
881         tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
882 }
883
884 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
885                              u64 *expires, u64 cur_time, int signo)
886 {
887         if (!it->expires)
888                 return;
889
890         if (cur_time >= it->expires) {
891                 if (it->incr)
892                         it->expires += it->incr;
893                 else
894                         it->expires = 0;
895
896                 trace_itimer_expire(signo == SIGPROF ?
897                                     ITIMER_PROF : ITIMER_VIRTUAL,
898                                     tsk->signal->leader_pid, cur_time);
899                 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
900         }
901
902         if (it->expires && (!*expires || it->expires < *expires))
903                 *expires = it->expires;
904 }
905
906 /*
907  * Check for any per-thread CPU timers that have fired and move them
908  * off the tsk->*_timers list onto the firing list.  Per-thread timers
909  * have already been taken off.
910  */
911 static void check_process_timers(struct task_struct *tsk,
912                                  struct list_head *firing)
913 {
914         struct signal_struct *const sig = tsk->signal;
915         u64 utime, ptime, virt_expires, prof_expires;
916         u64 sum_sched_runtime, sched_expires;
917         struct list_head *timers = sig->cpu_timers;
918         struct task_cputime cputime;
919         unsigned long soft;
920
921         if (dl_task(tsk))
922                 check_dl_overrun(tsk);
923
924         /*
925          * If cputimer is not running, then there are no active
926          * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
927          */
928         if (!READ_ONCE(tsk->signal->cputimer.running))
929                 return;
930
931         /*
932          * Signify that a thread is checking for process timers.
933          * Write access to this field is protected by the sighand lock.
934          */
935         sig->cputimer.checking_timer = true;
936
937         /*
938          * Collect the current process totals.
939          */
940         thread_group_cputimer(tsk, &cputime);
941         utime = cputime.utime;
942         ptime = utime + cputime.stime;
943         sum_sched_runtime = cputime.sum_exec_runtime;
944
945         prof_expires = check_timers_list(timers, firing, ptime);
946         virt_expires = check_timers_list(++timers, firing, utime);
947         sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
948
949         /*
950          * Check for the special case process timers.
951          */
952         check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
953                          SIGPROF);
954         check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
955                          SIGVTALRM);
956         soft = task_rlimit(tsk, RLIMIT_CPU);
957         if (soft != RLIM_INFINITY) {
958                 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
959                 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
960                 u64 x;
961                 if (psecs >= hard) {
962                         /*
963                          * At the hard limit, we just die.
964                          * No need to calculate anything else now.
965                          */
966                         if (print_fatal_signals) {
967                                 pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
968                                         tsk->comm, task_pid_nr(tsk));
969                         }
970                         __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
971                         return;
972                 }
973                 if (psecs >= soft) {
974                         /*
975                          * At the soft limit, send a SIGXCPU every second.
976                          */
977                         if (print_fatal_signals) {
978                                 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
979                                         tsk->comm, task_pid_nr(tsk));
980                         }
981                         __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
982                         if (soft < hard) {
983                                 soft++;
984                                 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
985                         }
986                 }
987                 x = soft * NSEC_PER_SEC;
988                 if (!prof_expires || x < prof_expires)
989                         prof_expires = x;
990         }
991
992         sig->cputime_expires.prof_exp = prof_expires;
993         sig->cputime_expires.virt_exp = virt_expires;
994         sig->cputime_expires.sched_exp = sched_expires;
995         if (task_cputime_zero(&sig->cputime_expires))
996                 stop_process_timers(sig);
997
998         sig->cputimer.checking_timer = false;
999 }
1000
1001 /*
1002  * This is called from the signal code (via posixtimer_rearm)
1003  * when the last timer signal was delivered and we have to reload the timer.
1004  */
1005 static void posix_cpu_timer_rearm(struct k_itimer *timer)
1006 {
1007         struct sighand_struct *sighand;
1008         unsigned long flags;
1009         struct task_struct *p = timer->it.cpu.task;
1010         u64 now;
1011
1012         WARN_ON_ONCE(p == NULL);
1013
1014         /*
1015          * Fetch the current sample and update the timer's expiry time.
1016          */
1017         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1018                 cpu_clock_sample(timer->it_clock, p, &now);
1019                 bump_cpu_timer(timer, now);
1020                 if (unlikely(p->exit_state))
1021                         return;
1022
1023                 /* Protect timer list r/w in arm_timer() */
1024                 sighand = lock_task_sighand(p, &flags);
1025                 if (!sighand)
1026                         return;
1027         } else {
1028                 /*
1029                  * Protect arm_timer() and timer sampling in case of call to
1030                  * thread_group_cputime().
1031                  */
1032                 sighand = lock_task_sighand(p, &flags);
1033                 if (unlikely(sighand == NULL)) {
1034                         /*
1035                          * The process has been reaped.
1036                          * We can't even collect a sample any more.
1037                          */
1038                         timer->it.cpu.expires = 0;
1039                         return;
1040                 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1041                         /* If the process is dying, no need to rearm */
1042                         goto unlock;
1043                 }
1044                 cpu_timer_sample_group(timer->it_clock, p, &now);
1045                 bump_cpu_timer(timer, now);
1046                 /* Leave the sighand locked for the call below.  */
1047         }
1048
1049         /*
1050          * Now re-arm for the new expiry time.
1051          */
1052         lockdep_assert_irqs_disabled();
1053         arm_timer(timer);
1054 unlock:
1055         unlock_task_sighand(p, &flags);
1056 }
1057
1058 /**
1059  * task_cputime_expired - Compare two task_cputime entities.
1060  *
1061  * @sample:     The task_cputime structure to be checked for expiration.
1062  * @expires:    Expiration times, against which @sample will be checked.
1063  *
1064  * Checks @sample against @expires to see if any field of @sample has expired.
1065  * Returns true if any field of the former is greater than the corresponding
1066  * field of the latter if the latter field is set.  Otherwise returns false.
1067  */
1068 static inline int task_cputime_expired(const struct task_cputime *sample,
1069                                         const struct task_cputime *expires)
1070 {
1071         if (expires->utime && sample->utime >= expires->utime)
1072                 return 1;
1073         if (expires->stime && sample->utime + sample->stime >= expires->stime)
1074                 return 1;
1075         if (expires->sum_exec_runtime != 0 &&
1076             sample->sum_exec_runtime >= expires->sum_exec_runtime)
1077                 return 1;
1078         return 0;
1079 }
1080
1081 /**
1082  * fastpath_timer_check - POSIX CPU timers fast path.
1083  *
1084  * @tsk:        The task (thread) being checked.
1085  *
1086  * Check the task and thread group timers.  If both are zero (there are no
1087  * timers set) return false.  Otherwise snapshot the task and thread group
1088  * timers and compare them with the corresponding expiration times.  Return
1089  * true if a timer has expired, else return false.
1090  */
1091 static inline int fastpath_timer_check(struct task_struct *tsk)
1092 {
1093         struct signal_struct *sig;
1094
1095         if (!task_cputime_zero(&tsk->cputime_expires)) {
1096                 struct task_cputime task_sample;
1097
1098                 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1099                 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1100                 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1101                         return 1;
1102         }
1103
1104         sig = tsk->signal;
1105         /*
1106          * Check if thread group timers expired when the cputimer is
1107          * running and no other thread in the group is already checking
1108          * for thread group cputimers. These fields are read without the
1109          * sighand lock. However, this is fine because this is meant to
1110          * be a fastpath heuristic to determine whether we should try to
1111          * acquire the sighand lock to check/handle timers.
1112          *
1113          * In the worst case scenario, if 'running' or 'checking_timer' gets
1114          * set but the current thread doesn't see the change yet, we'll wait
1115          * until the next thread in the group gets a scheduler interrupt to
1116          * handle the timer. This isn't an issue in practice because these
1117          * types of delays with signals actually getting sent are expected.
1118          */
1119         if (READ_ONCE(sig->cputimer.running) &&
1120             !READ_ONCE(sig->cputimer.checking_timer)) {
1121                 struct task_cputime group_sample;
1122
1123                 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1124
1125                 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1126                         return 1;
1127         }
1128
1129         if (dl_task(tsk) && tsk->dl.dl_overrun)
1130                 return 1;
1131
1132         return 0;
1133 }
1134
1135 /*
1136  * This is called from the timer interrupt handler.  The irq handler has
1137  * already updated our counts.  We need to check if any timers fire now.
1138  * Interrupts are disabled.
1139  */
1140 void run_posix_cpu_timers(struct task_struct *tsk)
1141 {
1142         LIST_HEAD(firing);
1143         struct k_itimer *timer, *next;
1144         unsigned long flags;
1145
1146         lockdep_assert_irqs_disabled();
1147
1148         /*
1149          * The fast path checks that there are no expired thread or thread
1150          * group timers.  If that's so, just return.
1151          */
1152         if (!fastpath_timer_check(tsk))
1153                 return;
1154
1155         if (!lock_task_sighand(tsk, &flags))
1156                 return;
1157         /*
1158          * Here we take off tsk->signal->cpu_timers[N] and
1159          * tsk->cpu_timers[N] all the timers that are firing, and
1160          * put them on the firing list.
1161          */
1162         check_thread_timers(tsk, &firing);
1163
1164         check_process_timers(tsk, &firing);
1165
1166         /*
1167          * We must release these locks before taking any timer's lock.
1168          * There is a potential race with timer deletion here, as the
1169          * siglock now protects our private firing list.  We have set
1170          * the firing flag in each timer, so that a deletion attempt
1171          * that gets the timer lock before we do will give it up and
1172          * spin until we've taken care of that timer below.
1173          */
1174         unlock_task_sighand(tsk, &flags);
1175
1176         /*
1177          * Now that all the timers on our list have the firing flag,
1178          * no one will touch their list entries but us.  We'll take
1179          * each timer's lock before clearing its firing flag, so no
1180          * timer call will interfere.
1181          */
1182         list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1183                 int cpu_firing;
1184
1185                 spin_lock(&timer->it_lock);
1186                 list_del_init(&timer->it.cpu.entry);
1187                 cpu_firing = timer->it.cpu.firing;
1188                 timer->it.cpu.firing = 0;
1189                 /*
1190                  * The firing flag is -1 if we collided with a reset
1191                  * of the timer, which already reported this
1192                  * almost-firing as an overrun.  So don't generate an event.
1193                  */
1194                 if (likely(cpu_firing >= 0))
1195                         cpu_timer_fire(timer);
1196                 spin_unlock(&timer->it_lock);
1197         }
1198 }
1199
1200 /*
1201  * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1202  * The tsk->sighand->siglock must be held by the caller.
1203  */
1204 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1205                            u64 *newval, u64 *oldval)
1206 {
1207         u64 now;
1208
1209         WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1210
1211         if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
1212                 /*
1213                  * We are setting itimer. The *oldval is absolute and we update
1214                  * it to be relative, *newval argument is relative and we update
1215                  * it to be absolute.
1216                  */
1217                 if (*oldval) {
1218                         if (*oldval <= now) {
1219                                 /* Just about to fire. */
1220                                 *oldval = TICK_NSEC;
1221                         } else {
1222                                 *oldval -= now;
1223                         }
1224                 }
1225
1226                 if (!*newval)
1227                         return;
1228                 *newval += now;
1229         }
1230
1231         /*
1232          * Update expiration cache if we are the earliest timer, or eventually
1233          * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1234          */
1235         switch (clock_idx) {
1236         case CPUCLOCK_PROF:
1237                 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1238                         tsk->signal->cputime_expires.prof_exp = *newval;
1239                 break;
1240         case CPUCLOCK_VIRT:
1241                 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1242                         tsk->signal->cputime_expires.virt_exp = *newval;
1243                 break;
1244         }
1245
1246         tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1247 }
1248
1249 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1250                             const struct timespec64 *rqtp)
1251 {
1252         struct itimerspec64 it;
1253         struct k_itimer timer;
1254         u64 expires;
1255         int error;
1256
1257         /*
1258          * Set up a temporary timer and then wait for it to go off.
1259          */
1260         memset(&timer, 0, sizeof timer);
1261         spin_lock_init(&timer.it_lock);
1262         timer.it_clock = which_clock;
1263         timer.it_overrun = -1;
1264         error = posix_cpu_timer_create(&timer);
1265         timer.it_process = current;
1266         if (!error) {
1267                 static struct itimerspec64 zero_it;
1268                 struct restart_block *restart;
1269
1270                 memset(&it, 0, sizeof(it));
1271                 it.it_value = *rqtp;
1272
1273                 spin_lock_irq(&timer.it_lock);
1274                 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1275                 if (error) {
1276                         spin_unlock_irq(&timer.it_lock);
1277                         return error;
1278                 }
1279
1280                 while (!signal_pending(current)) {
1281                         if (timer.it.cpu.expires == 0) {
1282                                 /*
1283                                  * Our timer fired and was reset, below
1284                                  * deletion can not fail.
1285                                  */
1286                                 posix_cpu_timer_del(&timer);
1287                                 spin_unlock_irq(&timer.it_lock);
1288                                 return 0;
1289                         }
1290
1291                         /*
1292                          * Block until cpu_timer_fire (or a signal) wakes us.
1293                          */
1294                         __set_current_state(TASK_INTERRUPTIBLE);
1295                         spin_unlock_irq(&timer.it_lock);
1296                         schedule();
1297                         spin_lock_irq(&timer.it_lock);
1298                 }
1299
1300                 /*
1301                  * We were interrupted by a signal.
1302                  */
1303                 expires = timer.it.cpu.expires;
1304                 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1305                 if (!error) {
1306                         /*
1307                          * Timer is now unarmed, deletion can not fail.
1308                          */
1309                         posix_cpu_timer_del(&timer);
1310                 }
1311                 spin_unlock_irq(&timer.it_lock);
1312
1313                 while (error == TIMER_RETRY) {
1314                         /*
1315                          * We need to handle case when timer was or is in the
1316                          * middle of firing. In other cases we already freed
1317                          * resources.
1318                          */
1319                         spin_lock_irq(&timer.it_lock);
1320                         error = posix_cpu_timer_del(&timer);
1321                         spin_unlock_irq(&timer.it_lock);
1322                 }
1323
1324                 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1325                         /*
1326                          * It actually did fire already.
1327                          */
1328                         return 0;
1329                 }
1330
1331                 error = -ERESTART_RESTARTBLOCK;
1332                 /*
1333                  * Report back to the user the time still remaining.
1334                  */
1335                 restart = &current->restart_block;
1336                 restart->nanosleep.expires = expires;
1337                 if (restart->nanosleep.type != TT_NONE)
1338                         error = nanosleep_copyout(restart, &it.it_value);
1339         }
1340
1341         return error;
1342 }
1343
1344 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1345
1346 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1347                             const struct timespec64 *rqtp)
1348 {
1349         struct restart_block *restart_block = &current->restart_block;
1350         int error;
1351
1352         /*
1353          * Diagnose required errors first.
1354          */
1355         if (CPUCLOCK_PERTHREAD(which_clock) &&
1356             (CPUCLOCK_PID(which_clock) == 0 ||
1357              CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1358                 return -EINVAL;
1359
1360         error = do_cpu_nanosleep(which_clock, flags, rqtp);
1361
1362         if (error == -ERESTART_RESTARTBLOCK) {
1363
1364                 if (flags & TIMER_ABSTIME)
1365                         return -ERESTARTNOHAND;
1366
1367                 restart_block->fn = posix_cpu_nsleep_restart;
1368                 restart_block->nanosleep.clockid = which_clock;
1369         }
1370         return error;
1371 }
1372
1373 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1374 {
1375         clockid_t which_clock = restart_block->nanosleep.clockid;
1376         struct timespec64 t;
1377
1378         t = ns_to_timespec64(restart_block->nanosleep.expires);
1379
1380         return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1381 }
1382
1383 #define PROCESS_CLOCK   make_process_cpuclock(0, CPUCLOCK_SCHED)
1384 #define THREAD_CLOCK    make_thread_cpuclock(0, CPUCLOCK_SCHED)
1385
1386 static int process_cpu_clock_getres(const clockid_t which_clock,
1387                                     struct timespec64 *tp)
1388 {
1389         return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1390 }
1391 static int process_cpu_clock_get(const clockid_t which_clock,
1392                                  struct timespec64 *tp)
1393 {
1394         return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1395 }
1396 static int process_cpu_timer_create(struct k_itimer *timer)
1397 {
1398         timer->it_clock = PROCESS_CLOCK;
1399         return posix_cpu_timer_create(timer);
1400 }
1401 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1402                               const struct timespec64 *rqtp)
1403 {
1404         return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1405 }
1406 static int thread_cpu_clock_getres(const clockid_t which_clock,
1407                                    struct timespec64 *tp)
1408 {
1409         return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1410 }
1411 static int thread_cpu_clock_get(const clockid_t which_clock,
1412                                 struct timespec64 *tp)
1413 {
1414         return posix_cpu_clock_get(THREAD_CLOCK, tp);
1415 }
1416 static int thread_cpu_timer_create(struct k_itimer *timer)
1417 {
1418         timer->it_clock = THREAD_CLOCK;
1419         return posix_cpu_timer_create(timer);
1420 }
1421
1422 const struct k_clock clock_posix_cpu = {
1423         .clock_getres   = posix_cpu_clock_getres,
1424         .clock_set      = posix_cpu_clock_set,
1425         .clock_get      = posix_cpu_clock_get,
1426         .timer_create   = posix_cpu_timer_create,
1427         .nsleep         = posix_cpu_nsleep,
1428         .timer_set      = posix_cpu_timer_set,
1429         .timer_del      = posix_cpu_timer_del,
1430         .timer_get      = posix_cpu_timer_get,
1431         .timer_rearm    = posix_cpu_timer_rearm,
1432 };
1433
1434 const struct k_clock clock_process = {
1435         .clock_getres   = process_cpu_clock_getres,
1436         .clock_get      = process_cpu_clock_get,
1437         .timer_create   = process_cpu_timer_create,
1438         .nsleep         = process_cpu_nsleep,
1439 };
1440
1441 const struct k_clock clock_thread = {
1442         .clock_getres   = thread_cpu_clock_getres,
1443         .clock_get      = thread_cpu_clock_get,
1444         .timer_create   = thread_cpu_timer_create,
1445 };