1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * High-resolution kernel timers
9 * In contrast to the low-resolution timeout API, aka timer wheel,
10 * hrtimers provide finer resolution and accuracy depending on system
11 * configuration and capabilities.
13 * Started by: Thomas Gleixner and Ingo Molnar
16 * Based on the original timer wheel code
18 * Help, testing, suggestions, bugfixes, improvements were
21 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
25 #include <linux/cpu.h>
26 #include <linux/export.h>
27 #include <linux/percpu.h>
28 #include <linux/hrtimer.h>
29 #include <linux/notifier.h>
30 #include <linux/syscalls.h>
31 #include <linux/interrupt.h>
32 #include <linux/tick.h>
33 #include <linux/err.h>
34 #include <linux/debugobjects.h>
35 #include <linux/sched/signal.h>
36 #include <linux/sched/sysctl.h>
37 #include <linux/sched/rt.h>
38 #include <linux/sched/deadline.h>
39 #include <linux/sched/nohz.h>
40 #include <linux/sched/debug.h>
41 #include <linux/timer.h>
42 #include <linux/freezer.h>
43 #include <linux/compat.h>
45 #include <linux/uaccess.h>
47 #include <trace/events/timer.h>
49 #include "tick-internal.h"
52 * Masks for selecting the soft and hard context timers from
55 #define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
56 #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
57 #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
58 #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
63 * There are more clockids than hrtimer bases. Thus, we index
64 * into the timer bases by the hrtimer_base_type enum. When trying
65 * to reach a base using a clockid, hrtimer_clockid_to_base()
66 * is used to convert from clockid to the proper hrtimer_base_type.
68 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
70 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
74 .index = HRTIMER_BASE_MONOTONIC,
75 .clockid = CLOCK_MONOTONIC,
76 .get_time = &ktime_get,
79 .index = HRTIMER_BASE_REALTIME,
80 .clockid = CLOCK_REALTIME,
81 .get_time = &ktime_get_real,
84 .index = HRTIMER_BASE_BOOTTIME,
85 .clockid = CLOCK_BOOTTIME,
86 .get_time = &ktime_get_boottime,
89 .index = HRTIMER_BASE_TAI,
91 .get_time = &ktime_get_clocktai,
94 .index = HRTIMER_BASE_MONOTONIC_SOFT,
95 .clockid = CLOCK_MONOTONIC,
96 .get_time = &ktime_get,
99 .index = HRTIMER_BASE_REALTIME_SOFT,
100 .clockid = CLOCK_REALTIME,
101 .get_time = &ktime_get_real,
104 .index = HRTIMER_BASE_BOOTTIME_SOFT,
105 .clockid = CLOCK_BOOTTIME,
106 .get_time = &ktime_get_boottime,
109 .index = HRTIMER_BASE_TAI_SOFT,
110 .clockid = CLOCK_TAI,
111 .get_time = &ktime_get_clocktai,
116 static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
117 /* Make sure we catch unsupported clockids */
118 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
122 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
123 [CLOCK_TAI] = HRTIMER_BASE_TAI,
127 * Functions and macros which are different for UP/SMP systems are kept in a
133 * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
134 * such that hrtimer_callback_running() can unconditionally dereference
135 * timer->base->cpu_base
137 static struct hrtimer_cpu_base migration_cpu_base = {
139 .cpu_base = &migration_cpu_base,
140 .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
141 &migration_cpu_base.lock),
145 #define migration_base migration_cpu_base.clock_base[0]
147 static inline bool is_migration_base(struct hrtimer_clock_base *base)
149 return base == &migration_base;
153 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
154 * means that all timers which are tied to this base via timer->base are
155 * locked, and the base itself is locked too.
157 * So __run_timers/migrate_timers can safely modify all timers which could
158 * be found on the lists/queues.
160 * When the timer's base is locked, and the timer removed from list, it is
161 * possible to set timer->base = &migration_base and drop the lock: the timer
165 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
166 unsigned long *flags)
168 struct hrtimer_clock_base *base;
171 base = READ_ONCE(timer->base);
172 if (likely(base != &migration_base)) {
173 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
174 if (likely(base == timer->base))
176 /* The timer has migrated to another CPU: */
177 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
184 * We do not migrate the timer when it is expiring before the next
185 * event on the target cpu. When high resolution is enabled, we cannot
186 * reprogram the target cpu hardware and we would cause it to fire
187 * late. To keep it simple, we handle the high resolution enabled and
188 * disabled case similar.
190 * Called with cpu_base->lock of target cpu held.
193 hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
197 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
198 return expires < new_base->cpu_base->expires_next;
202 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
205 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
206 if (static_branch_likely(&timers_migration_enabled) && !pinned)
207 return &per_cpu(hrtimer_bases, get_nohz_timer_target());
213 * We switch the timer base to a power-optimized selected CPU target,
215 * - NO_HZ_COMMON is enabled
216 * - timer migration is enabled
217 * - the timer callback is not running
218 * - the timer is not the first expiring timer on the new target
220 * If one of the above requirements is not fulfilled we move the timer
221 * to the current CPU or leave it on the previously assigned CPU if
222 * the timer callback is currently running.
224 static inline struct hrtimer_clock_base *
225 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
228 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
229 struct hrtimer_clock_base *new_base;
230 int basenum = base->index;
232 this_cpu_base = this_cpu_ptr(&hrtimer_bases);
233 new_cpu_base = get_target_base(this_cpu_base, pinned);
235 new_base = &new_cpu_base->clock_base[basenum];
237 if (base != new_base) {
239 * We are trying to move timer to new_base.
240 * However we can't change timer's base while it is running,
241 * so we keep it on the same CPU. No hassle vs. reprogramming
242 * the event source in the high resolution case. The softirq
243 * code will take care of this when the timer function has
244 * completed. There is no conflict as we hold the lock until
245 * the timer is enqueued.
247 if (unlikely(hrtimer_callback_running(timer)))
250 /* See the comment in lock_hrtimer_base() */
251 WRITE_ONCE(timer->base, &migration_base);
252 raw_spin_unlock(&base->cpu_base->lock);
253 raw_spin_lock(&new_base->cpu_base->lock);
255 if (new_cpu_base != this_cpu_base &&
256 hrtimer_check_target(timer, new_base)) {
257 raw_spin_unlock(&new_base->cpu_base->lock);
258 raw_spin_lock(&base->cpu_base->lock);
259 new_cpu_base = this_cpu_base;
260 WRITE_ONCE(timer->base, base);
263 WRITE_ONCE(timer->base, new_base);
265 if (new_cpu_base != this_cpu_base &&
266 hrtimer_check_target(timer, new_base)) {
267 new_cpu_base = this_cpu_base;
274 #else /* CONFIG_SMP */
276 static inline bool is_migration_base(struct hrtimer_clock_base *base)
281 static inline struct hrtimer_clock_base *
282 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
284 struct hrtimer_clock_base *base = timer->base;
286 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
291 # define switch_hrtimer_base(t, b, p) (b)
293 #endif /* !CONFIG_SMP */
296 * Functions for the union type storage format of ktime_t which are
297 * too large for inlining:
299 #if BITS_PER_LONG < 64
301 * Divide a ktime value by a nanosecond value
303 s64 __ktime_divns(const ktime_t kt, s64 div)
309 dclc = ktime_to_ns(kt);
310 tmp = dclc < 0 ? -dclc : dclc;
312 /* Make sure the divisor is less than 2^32: */
318 do_div(tmp, (u32) div);
319 return dclc < 0 ? -tmp : tmp;
321 EXPORT_SYMBOL_GPL(__ktime_divns);
322 #endif /* BITS_PER_LONG >= 64 */
325 * Add two ktime values and do a safety check for overflow:
327 ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
329 ktime_t res = ktime_add_unsafe(lhs, rhs);
332 * We use KTIME_SEC_MAX here, the maximum timeout which we can
333 * return to user space in a timespec:
335 if (res < 0 || res < lhs || res < rhs)
336 res = ktime_set(KTIME_SEC_MAX, 0);
341 EXPORT_SYMBOL_GPL(ktime_add_safe);
343 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
345 static const struct debug_obj_descr hrtimer_debug_descr;
347 static void *hrtimer_debug_hint(void *addr)
349 return ((struct hrtimer *) addr)->function;
353 * fixup_init is called when:
354 * - an active object is initialized
356 static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
358 struct hrtimer *timer = addr;
361 case ODEBUG_STATE_ACTIVE:
362 hrtimer_cancel(timer);
363 debug_object_init(timer, &hrtimer_debug_descr);
371 * fixup_activate is called when:
372 * - an active object is activated
373 * - an unknown non-static object is activated
375 static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
378 case ODEBUG_STATE_ACTIVE:
387 * fixup_free is called when:
388 * - an active object is freed
390 static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
392 struct hrtimer *timer = addr;
395 case ODEBUG_STATE_ACTIVE:
396 hrtimer_cancel(timer);
397 debug_object_free(timer, &hrtimer_debug_descr);
404 static const struct debug_obj_descr hrtimer_debug_descr = {
406 .debug_hint = hrtimer_debug_hint,
407 .fixup_init = hrtimer_fixup_init,
408 .fixup_activate = hrtimer_fixup_activate,
409 .fixup_free = hrtimer_fixup_free,
412 static inline void debug_hrtimer_init(struct hrtimer *timer)
414 debug_object_init(timer, &hrtimer_debug_descr);
417 static inline void debug_hrtimer_activate(struct hrtimer *timer,
418 enum hrtimer_mode mode)
420 debug_object_activate(timer, &hrtimer_debug_descr);
423 static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
425 debug_object_deactivate(timer, &hrtimer_debug_descr);
428 static inline void debug_hrtimer_free(struct hrtimer *timer)
430 debug_object_free(timer, &hrtimer_debug_descr);
433 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
434 enum hrtimer_mode mode);
436 void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
437 enum hrtimer_mode mode)
439 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
440 __hrtimer_init(timer, clock_id, mode);
442 EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
444 static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
445 clockid_t clock_id, enum hrtimer_mode mode);
447 void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
448 clockid_t clock_id, enum hrtimer_mode mode)
450 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
451 __hrtimer_init_sleeper(sl, clock_id, mode);
453 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
455 void destroy_hrtimer_on_stack(struct hrtimer *timer)
457 debug_object_free(timer, &hrtimer_debug_descr);
459 EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
463 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
464 static inline void debug_hrtimer_activate(struct hrtimer *timer,
465 enum hrtimer_mode mode) { }
466 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
470 debug_init(struct hrtimer *timer, clockid_t clockid,
471 enum hrtimer_mode mode)
473 debug_hrtimer_init(timer);
474 trace_hrtimer_init(timer, clockid, mode);
477 static inline void debug_activate(struct hrtimer *timer,
478 enum hrtimer_mode mode)
480 debug_hrtimer_activate(timer, mode);
481 trace_hrtimer_start(timer, mode);
484 static inline void debug_deactivate(struct hrtimer *timer)
486 debug_hrtimer_deactivate(timer);
487 trace_hrtimer_cancel(timer);
490 static struct hrtimer_clock_base *
491 __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
498 idx = __ffs(*active);
499 *active &= ~(1U << idx);
501 return &cpu_base->clock_base[idx];
504 #define for_each_active_base(base, cpu_base, active) \
505 while ((base = __next_base((cpu_base), &(active))))
507 static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
508 const struct hrtimer *exclude,
510 ktime_t expires_next)
512 struct hrtimer_clock_base *base;
515 for_each_active_base(base, cpu_base, active) {
516 struct timerqueue_node *next;
517 struct hrtimer *timer;
519 next = timerqueue_getnext(&base->active);
520 timer = container_of(next, struct hrtimer, node);
521 if (timer == exclude) {
522 /* Get to the next timer in the queue. */
523 next = timerqueue_iterate_next(next);
527 timer = container_of(next, struct hrtimer, node);
529 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
530 if (expires < expires_next) {
531 expires_next = expires;
533 /* Skip cpu_base update if a timer is being excluded. */
538 cpu_base->softirq_next_timer = timer;
540 cpu_base->next_timer = timer;
544 * clock_was_set() might have changed base->offset of any of
545 * the clock bases so the result might be negative. Fix it up
546 * to prevent a false positive in clockevents_program_event().
548 if (expires_next < 0)
554 * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
555 * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
557 * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
558 * those timers will get run whenever the softirq gets handled, at the end of
559 * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
561 * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
562 * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
563 * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
565 * @active_mask must be one of:
566 * - HRTIMER_ACTIVE_ALL,
567 * - HRTIMER_ACTIVE_SOFT, or
568 * - HRTIMER_ACTIVE_HARD.
571 __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
574 struct hrtimer *next_timer = NULL;
575 ktime_t expires_next = KTIME_MAX;
577 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
578 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
579 cpu_base->softirq_next_timer = NULL;
580 expires_next = __hrtimer_next_event_base(cpu_base, NULL,
583 next_timer = cpu_base->softirq_next_timer;
586 if (active_mask & HRTIMER_ACTIVE_HARD) {
587 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
588 cpu_base->next_timer = next_timer;
589 expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
596 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
598 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
599 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
600 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
602 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
603 offs_real, offs_boot, offs_tai);
605 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
606 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
607 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
613 * Is the high resolution mode active ?
615 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
617 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
618 cpu_base->hres_active : 0;
621 static inline int hrtimer_hres_active(void)
623 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
627 * Reprogram the event source with checking both queues for the
629 * Called with interrupts disabled and base->lock held
632 hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
634 ktime_t expires_next;
637 * Find the current next expiration time.
639 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
641 if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
643 * When the softirq is activated, hrtimer has to be
644 * programmed with the first hard hrtimer because soft
645 * timer interrupt could occur too late.
647 if (cpu_base->softirq_activated)
648 expires_next = __hrtimer_get_next_event(cpu_base,
649 HRTIMER_ACTIVE_HARD);
651 cpu_base->softirq_expires_next = expires_next;
654 if (skip_equal && expires_next == cpu_base->expires_next)
657 cpu_base->expires_next = expires_next;
660 * If hres is not active, hardware does not have to be
663 * If a hang was detected in the last timer interrupt then we
664 * leave the hang delay active in the hardware. We want the
665 * system to make progress. That also prevents the following
667 * T1 expires 50ms from now
668 * T2 expires 5s from now
670 * T1 is removed, so this code is called and would reprogram
671 * the hardware to 5s from now. Any hrtimer_start after that
672 * will not reprogram the hardware due to hang_detected being
673 * set. So we'd effectivly block all timers until the T2 event
676 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
679 tick_program_event(cpu_base->expires_next, 1);
682 /* High resolution timer related functions */
683 #ifdef CONFIG_HIGH_RES_TIMERS
686 * High resolution timer enabled ?
688 static bool hrtimer_hres_enabled __read_mostly = true;
689 unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
690 EXPORT_SYMBOL_GPL(hrtimer_resolution);
693 * Enable / Disable high resolution mode
695 static int __init setup_hrtimer_hres(char *str)
697 return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
700 __setup("highres=", setup_hrtimer_hres);
703 * hrtimer_high_res_enabled - query, if the highres mode is enabled
705 static inline int hrtimer_is_hres_enabled(void)
707 return hrtimer_hres_enabled;
711 * Retrigger next event is called after clock was set
713 * Called with interrupts disabled via on_each_cpu()
715 static void retrigger_next_event(void *arg)
717 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
719 if (!__hrtimer_hres_active(base))
722 raw_spin_lock(&base->lock);
723 hrtimer_update_base(base);
724 hrtimer_force_reprogram(base, 0);
725 raw_spin_unlock(&base->lock);
729 * Switch to high resolution mode
731 static void hrtimer_switch_to_hres(void)
733 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
735 if (tick_init_highres()) {
736 pr_warn("Could not switch to high resolution mode on CPU %u\n",
740 base->hres_active = 1;
741 hrtimer_resolution = HIGH_RES_NSEC;
743 tick_setup_sched_timer();
744 /* "Retrigger" the interrupt to get things going */
745 retrigger_next_event(NULL);
748 static void clock_was_set_work(struct work_struct *work)
753 static DECLARE_WORK(hrtimer_work, clock_was_set_work);
756 * Called from timekeeping and resume code to reprogram the hrtimer
757 * interrupt device on all cpus.
759 void clock_was_set_delayed(void)
761 schedule_work(&hrtimer_work);
766 static inline int hrtimer_is_hres_enabled(void) { return 0; }
767 static inline void hrtimer_switch_to_hres(void) { }
768 static inline void retrigger_next_event(void *arg) { }
770 #endif /* CONFIG_HIGH_RES_TIMERS */
773 * When a timer is enqueued and expires earlier than the already enqueued
774 * timers, we have to check, whether it expires earlier than the timer for
775 * which the clock event device was armed.
777 * Called with interrupts disabled and base->cpu_base.lock held
779 static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
781 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
782 struct hrtimer_clock_base *base = timer->base;
783 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
785 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
788 * CLOCK_REALTIME timer might be requested with an absolute
789 * expiry time which is less than base->offset. Set it to 0.
794 if (timer->is_soft) {
796 * soft hrtimer could be started on a remote CPU. In this
797 * case softirq_expires_next needs to be updated on the
798 * remote CPU. The soft hrtimer will not expire before the
799 * first hard hrtimer on the remote CPU -
800 * hrtimer_check_target() prevents this case.
802 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
804 if (timer_cpu_base->softirq_activated)
807 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
810 timer_cpu_base->softirq_next_timer = timer;
811 timer_cpu_base->softirq_expires_next = expires;
813 if (!ktime_before(expires, timer_cpu_base->expires_next) ||
819 * If the timer is not on the current cpu, we cannot reprogram
820 * the other cpus clock event device.
822 if (base->cpu_base != cpu_base)
826 * If the hrtimer interrupt is running, then it will
827 * reevaluate the clock bases and reprogram the clock event
828 * device. The callbacks are always executed in hard interrupt
829 * context so we don't need an extra check for a running
832 if (cpu_base->in_hrtirq)
835 if (expires >= cpu_base->expires_next)
838 /* Update the pointer to the next expiring timer */
839 cpu_base->next_timer = timer;
840 cpu_base->expires_next = expires;
843 * If hres is not active, hardware does not have to be
846 * If a hang was detected in the last timer interrupt then we
847 * do not schedule a timer which is earlier than the expiry
848 * which we enforced in the hang detection. We want the system
851 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
855 * Program the timer hardware. We enforce the expiry for
856 * events which are already in the past.
858 tick_program_event(expires, 1);
862 * Clock realtime was set
864 * Change the offset of the realtime clock vs. the monotonic
867 * We might have to reprogram the high resolution timer interrupt. On
868 * SMP we call the architecture specific code to retrigger _all_ high
869 * resolution timer interrupts. On UP we just disable interrupts and
870 * call the high resolution interrupt code.
872 void clock_was_set(void)
874 #ifdef CONFIG_HIGH_RES_TIMERS
875 /* Retrigger the CPU local events everywhere */
876 on_each_cpu(retrigger_next_event, NULL, 1);
878 timerfd_clock_was_set();
882 * During resume we might have to reprogram the high resolution timer
883 * interrupt on all online CPUs. However, all other CPUs will be
884 * stopped with IRQs interrupts disabled so the clock_was_set() call
887 void hrtimers_resume(void)
889 lockdep_assert_irqs_disabled();
890 /* Retrigger on the local CPU */
891 retrigger_next_event(NULL);
892 /* And schedule a retrigger for all others */
893 clock_was_set_delayed();
897 * Counterpart to lock_hrtimer_base above:
900 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
902 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
906 * hrtimer_forward - forward the timer expiry
907 * @timer: hrtimer to forward
908 * @now: forward past this time
909 * @interval: the interval to forward
911 * Forward the timer expiry so it will expire in the future.
912 * Returns the number of overruns.
914 * Can be safely called from the callback function of @timer. If
915 * called from other contexts @timer must neither be enqueued nor
916 * running the callback and the caller needs to take care of
919 * Note: This only updates the timer expiry value and does not requeue
922 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
927 delta = ktime_sub(now, hrtimer_get_expires(timer));
932 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
935 if (interval < hrtimer_resolution)
936 interval = hrtimer_resolution;
938 if (unlikely(delta >= interval)) {
939 s64 incr = ktime_to_ns(interval);
941 orun = ktime_divns(delta, incr);
942 hrtimer_add_expires_ns(timer, incr * orun);
943 if (hrtimer_get_expires_tv64(timer) > now)
946 * This (and the ktime_add() below) is the
947 * correction for exact:
951 hrtimer_add_expires(timer, interval);
955 EXPORT_SYMBOL_GPL(hrtimer_forward);
958 * enqueue_hrtimer - internal function to (re)start a timer
960 * The timer is inserted in expiry order. Insertion into the
961 * red black tree is O(log(n)). Must hold the base lock.
963 * Returns 1 when the new timer is the leftmost timer in the tree.
965 static int enqueue_hrtimer(struct hrtimer *timer,
966 struct hrtimer_clock_base *base,
967 enum hrtimer_mode mode)
969 debug_activate(timer, mode);
971 base->cpu_base->active_bases |= 1 << base->index;
973 /* Pairs with the lockless read in hrtimer_is_queued() */
974 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
976 return timerqueue_add(&base->active, &timer->node);
980 * __remove_hrtimer - internal function to remove a timer
982 * Caller must hold the base lock.
984 * High resolution timer mode reprograms the clock event device when the
985 * timer is the one which expires next. The caller can disable this by setting
986 * reprogram to zero. This is useful, when the context does a reprogramming
987 * anyway (e.g. timer interrupt)
989 static void __remove_hrtimer(struct hrtimer *timer,
990 struct hrtimer_clock_base *base,
991 u8 newstate, int reprogram)
993 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
994 u8 state = timer->state;
996 /* Pairs with the lockless read in hrtimer_is_queued() */
997 WRITE_ONCE(timer->state, newstate);
998 if (!(state & HRTIMER_STATE_ENQUEUED))
1001 if (!timerqueue_del(&base->active, &timer->node))
1002 cpu_base->active_bases &= ~(1 << base->index);
1005 * Note: If reprogram is false we do not update
1006 * cpu_base->next_timer. This happens when we remove the first
1007 * timer on a remote cpu. No harm as we never dereference
1008 * cpu_base->next_timer. So the worst thing what can happen is
1009 * an superflous call to hrtimer_force_reprogram() on the
1010 * remote cpu later on if the same timer gets enqueued again.
1012 if (reprogram && timer == cpu_base->next_timer)
1013 hrtimer_force_reprogram(cpu_base, 1);
1017 * remove hrtimer, called with base lock held
1020 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
1022 u8 state = timer->state;
1024 if (state & HRTIMER_STATE_ENQUEUED) {
1028 * Remove the timer and force reprogramming when high
1029 * resolution mode is active and the timer is on the current
1030 * CPU. If we remove a timer on another CPU, reprogramming is
1031 * skipped. The interrupt event on this CPU is fired and
1032 * reprogramming happens in the interrupt handler. This is a
1033 * rare case and less expensive than a smp call.
1035 debug_deactivate(timer);
1036 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1039 state = HRTIMER_STATE_INACTIVE;
1041 __remove_hrtimer(timer, base, state, reprogram);
1047 static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1048 const enum hrtimer_mode mode)
1050 #ifdef CONFIG_TIME_LOW_RES
1052 * CONFIG_TIME_LOW_RES indicates that the system has no way to return
1053 * granular time values. For relative timers we add hrtimer_resolution
1054 * (i.e. one jiffie) to prevent short timeouts.
1056 timer->is_rel = mode & HRTIMER_MODE_REL;
1058 tim = ktime_add_safe(tim, hrtimer_resolution);
1064 hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
1069 * Find the next SOFT expiration.
1071 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
1074 * reprogramming needs to be triggered, even if the next soft
1075 * hrtimer expires at the same time than the next hard
1076 * hrtimer. cpu_base->softirq_expires_next needs to be updated!
1078 if (expires == KTIME_MAX)
1082 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
1083 * cpu_base->*expires_next is only set by hrtimer_reprogram()
1085 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1088 static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1089 u64 delta_ns, const enum hrtimer_mode mode,
1090 struct hrtimer_clock_base *base)
1092 struct hrtimer_clock_base *new_base;
1094 /* Remove an active timer from the queue: */
1095 remove_hrtimer(timer, base, true);
1097 if (mode & HRTIMER_MODE_REL)
1098 tim = ktime_add_safe(tim, base->get_time());
1100 tim = hrtimer_update_lowres(timer, tim, mode);
1102 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1104 /* Switch the timer base, if necessary: */
1105 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1107 return enqueue_hrtimer(timer, new_base, mode);
1111 * hrtimer_start_range_ns - (re)start an hrtimer
1112 * @timer: the timer to be added
1114 * @delta_ns: "slack" range for the timer
1115 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
1116 * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
1117 * softirq based mode is considered for debug purpose only!
1119 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1120 u64 delta_ns, const enum hrtimer_mode mode)
1122 struct hrtimer_clock_base *base;
1123 unsigned long flags;
1126 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
1127 * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
1128 * expiry mode because unmarked timers are moved to softirq expiry.
1130 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1131 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1133 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1135 base = lock_hrtimer_base(timer, &flags);
1137 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1138 hrtimer_reprogram(timer, true);
1140 unlock_hrtimer_base(timer, &flags);
1142 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1145 * hrtimer_try_to_cancel - try to deactivate a timer
1146 * @timer: hrtimer to stop
1150 * * 0 when the timer was not active
1151 * * 1 when the timer was active
1152 * * -1 when the timer is currently executing the callback function and
1155 int hrtimer_try_to_cancel(struct hrtimer *timer)
1157 struct hrtimer_clock_base *base;
1158 unsigned long flags;
1162 * Check lockless first. If the timer is not active (neither
1163 * enqueued nor running the callback, nothing to do here. The
1164 * base lock does not serialize against a concurrent enqueue,
1165 * so we can avoid taking it.
1167 if (!hrtimer_active(timer))
1170 base = lock_hrtimer_base(timer, &flags);
1172 if (!hrtimer_callback_running(timer))
1173 ret = remove_hrtimer(timer, base, false);
1175 unlock_hrtimer_base(timer, &flags);
1180 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1182 #ifdef CONFIG_PREEMPT_RT
1183 static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
1185 spin_lock_init(&base->softirq_expiry_lock);
1188 static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
1190 spin_lock(&base->softirq_expiry_lock);
1193 static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
1195 spin_unlock(&base->softirq_expiry_lock);
1199 * The counterpart to hrtimer_cancel_wait_running().
1201 * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
1202 * the timer callback to finish. Drop expiry_lock and reaquire it. That
1203 * allows the waiter to acquire the lock and make progress.
1205 static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1206 unsigned long flags)
1208 if (atomic_read(&cpu_base->timer_waiters)) {
1209 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1210 spin_unlock(&cpu_base->softirq_expiry_lock);
1211 spin_lock(&cpu_base->softirq_expiry_lock);
1212 raw_spin_lock_irq(&cpu_base->lock);
1217 * This function is called on PREEMPT_RT kernels when the fast path
1218 * deletion of a timer failed because the timer callback function was
1221 * This prevents priority inversion: if the soft irq thread is preempted
1222 * in the middle of a timer callback, then calling del_timer_sync() can
1223 * lead to two issues:
1225 * - If the caller is on a remote CPU then it has to spin wait for the timer
1226 * handler to complete. This can result in unbound priority inversion.
1228 * - If the caller originates from the task which preempted the timer
1229 * handler on the same CPU, then spin waiting for the timer handler to
1230 * complete is never going to end.
1232 void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1234 /* Lockless read. Prevent the compiler from reloading it below */
1235 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1238 * Just relax if the timer expires in hard interrupt context or if
1239 * it is currently on the migration base.
1241 if (!timer->is_soft || is_migration_base(base)) {
1247 * Mark the base as contended and grab the expiry lock, which is
1248 * held by the softirq across the timer callback. Drop the lock
1249 * immediately so the softirq can expire the next timer. In theory
1250 * the timer could already be running again, but that's more than
1251 * unlikely and just causes another wait loop.
1253 atomic_inc(&base->cpu_base->timer_waiters);
1254 spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1255 atomic_dec(&base->cpu_base->timer_waiters);
1256 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1260 hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
1262 hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
1264 hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
1265 static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
1266 unsigned long flags) { }
1270 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1271 * @timer: the timer to be cancelled
1274 * 0 when the timer was not active
1275 * 1 when the timer was active
1277 int hrtimer_cancel(struct hrtimer *timer)
1282 ret = hrtimer_try_to_cancel(timer);
1285 hrtimer_cancel_wait_running(timer);
1289 EXPORT_SYMBOL_GPL(hrtimer_cancel);
1292 * hrtimer_get_remaining - get remaining time for the timer
1293 * @timer: the timer to read
1294 * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
1296 ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1298 unsigned long flags;
1301 lock_hrtimer_base(timer, &flags);
1302 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1303 rem = hrtimer_expires_remaining_adjusted(timer);
1305 rem = hrtimer_expires_remaining(timer);
1306 unlock_hrtimer_base(timer, &flags);
1310 EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1312 #ifdef CONFIG_NO_HZ_COMMON
1314 * hrtimer_get_next_event - get the time until next expiry event
1316 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1318 u64 hrtimer_get_next_event(void)
1320 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1321 u64 expires = KTIME_MAX;
1322 unsigned long flags;
1324 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1326 if (!__hrtimer_hres_active(cpu_base))
1327 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1329 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1335 * hrtimer_next_event_without - time until next expiry event w/o one timer
1336 * @exclude: timer to exclude
1338 * Returns the next expiry time over all timers except for the @exclude one or
1339 * KTIME_MAX if none of them is pending.
1341 u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1343 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1344 u64 expires = KTIME_MAX;
1345 unsigned long flags;
1347 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1349 if (__hrtimer_hres_active(cpu_base)) {
1350 unsigned int active;
1352 if (!cpu_base->softirq_activated) {
1353 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1354 expires = __hrtimer_next_event_base(cpu_base, exclude,
1357 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1358 expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1362 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1368 static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1370 if (likely(clock_id < MAX_CLOCKS)) {
1371 int base = hrtimer_clock_to_base_table[clock_id];
1373 if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1376 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1377 return HRTIMER_BASE_MONOTONIC;
1380 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1381 enum hrtimer_mode mode)
1383 bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
1384 struct hrtimer_cpu_base *cpu_base;
1388 * On PREEMPT_RT enabled kernels hrtimers which are not explicitely
1389 * marked for hard interrupt expiry mode are moved into soft
1390 * interrupt context for latency reasons and because the callbacks
1391 * can invoke functions which might sleep on RT, e.g. spin_lock().
1393 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
1396 memset(timer, 0, sizeof(struct hrtimer));
1398 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1401 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
1402 * clock modifications, so they needs to become CLOCK_MONOTONIC to
1403 * ensure POSIX compliance.
1405 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1406 clock_id = CLOCK_MONOTONIC;
1408 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
1409 base += hrtimer_clockid_to_base(clock_id);
1410 timer->is_soft = softtimer;
1411 timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1412 timer->base = &cpu_base->clock_base[base];
1413 timerqueue_init(&timer->node);
1417 * hrtimer_init - initialize a timer to the given clock
1418 * @timer: the timer to be initialized
1419 * @clock_id: the clock to be used
1420 * @mode: The modes which are relevant for intitialization:
1421 * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
1422 * HRTIMER_MODE_REL_SOFT
1424 * The PINNED variants of the above can be handed in,
1425 * but the PINNED bit is ignored as pinning happens
1426 * when the hrtimer is started
1428 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1429 enum hrtimer_mode mode)
1431 debug_init(timer, clock_id, mode);
1432 __hrtimer_init(timer, clock_id, mode);
1434 EXPORT_SYMBOL_GPL(hrtimer_init);
1437 * A timer is active, when it is enqueued into the rbtree or the
1438 * callback function is running or it's in the state of being migrated
1441 * It is important for this function to not return a false negative.
1443 bool hrtimer_active(const struct hrtimer *timer)
1445 struct hrtimer_clock_base *base;
1449 base = READ_ONCE(timer->base);
1450 seq = raw_read_seqcount_begin(&base->seq);
1452 if (timer->state != HRTIMER_STATE_INACTIVE ||
1453 base->running == timer)
1456 } while (read_seqcount_retry(&base->seq, seq) ||
1457 base != READ_ONCE(timer->base));
1461 EXPORT_SYMBOL_GPL(hrtimer_active);
1464 * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
1465 * distinct sections:
1467 * - queued: the timer is queued
1468 * - callback: the timer is being ran
1469 * - post: the timer is inactive or (re)queued
1471 * On the read side we ensure we observe timer->state and cpu_base->running
1472 * from the same section, if anything changed while we looked at it, we retry.
1473 * This includes timer->base changing because sequence numbers alone are
1474 * insufficient for that.
1476 * The sequence numbers are required because otherwise we could still observe
1477 * a false negative if the read side got smeared over multiple consequtive
1478 * __run_hrtimer() invocations.
1481 static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1482 struct hrtimer_clock_base *base,
1483 struct hrtimer *timer, ktime_t *now,
1484 unsigned long flags) __must_hold(&cpu_base->lock)
1486 enum hrtimer_restart (*fn)(struct hrtimer *);
1487 bool expires_in_hardirq;
1490 lockdep_assert_held(&cpu_base->lock);
1492 debug_deactivate(timer);
1493 base->running = timer;
1496 * Separate the ->running assignment from the ->state assignment.
1498 * As with a regular write barrier, this ensures the read side in
1499 * hrtimer_active() cannot observe base->running == NULL &&
1500 * timer->state == INACTIVE.
1502 raw_write_seqcount_barrier(&base->seq);
1504 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1505 fn = timer->function;
1508 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
1509 * timer is restarted with a period then it becomes an absolute
1510 * timer. If its not restarted it does not matter.
1512 if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1513 timer->is_rel = false;
1516 * The timer is marked as running in the CPU base, so it is
1517 * protected against migration to a different CPU even if the lock
1520 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1521 trace_hrtimer_expire_entry(timer, now);
1522 expires_in_hardirq = lockdep_hrtimer_enter(timer);
1524 restart = fn(timer);
1526 lockdep_hrtimer_exit(expires_in_hardirq);
1527 trace_hrtimer_expire_exit(timer);
1528 raw_spin_lock_irq(&cpu_base->lock);
1531 * Note: We clear the running state after enqueue_hrtimer and
1532 * we do not reprogram the event hardware. Happens either in
1533 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1535 * Note: Because we dropped the cpu_base->lock above,
1536 * hrtimer_start_range_ns() can have popped in and enqueued the timer
1539 if (restart != HRTIMER_NORESTART &&
1540 !(timer->state & HRTIMER_STATE_ENQUEUED))
1541 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1544 * Separate the ->running assignment from the ->state assignment.
1546 * As with a regular write barrier, this ensures the read side in
1547 * hrtimer_active() cannot observe base->running.timer == NULL &&
1548 * timer->state == INACTIVE.
1550 raw_write_seqcount_barrier(&base->seq);
1552 WARN_ON_ONCE(base->running != timer);
1553 base->running = NULL;
1556 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1557 unsigned long flags, unsigned int active_mask)
1559 struct hrtimer_clock_base *base;
1560 unsigned int active = cpu_base->active_bases & active_mask;
1562 for_each_active_base(base, cpu_base, active) {
1563 struct timerqueue_node *node;
1566 basenow = ktime_add(now, base->offset);
1568 while ((node = timerqueue_getnext(&base->active))) {
1569 struct hrtimer *timer;
1571 timer = container_of(node, struct hrtimer, node);
1574 * The immediate goal for using the softexpires is
1575 * minimizing wakeups, not running timers at the
1576 * earliest interrupt after their soft expiration.
1577 * This allows us to avoid using a Priority Search
1578 * Tree, which can answer a stabbing querry for
1579 * overlapping intervals and instead use the simple
1580 * BST we already have.
1581 * We don't add extra wakeups by delaying timers that
1582 * are right-of a not yet expired timer, because that
1583 * timer will have to trigger a wakeup anyway.
1585 if (basenow < hrtimer_get_softexpires_tv64(timer))
1588 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1589 if (active_mask == HRTIMER_ACTIVE_SOFT)
1590 hrtimer_sync_wait_running(cpu_base, flags);
1595 static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1597 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1598 unsigned long flags;
1601 hrtimer_cpu_base_lock_expiry(cpu_base);
1602 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1604 now = hrtimer_update_base(cpu_base);
1605 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
1607 cpu_base->softirq_activated = 0;
1608 hrtimer_update_softirq_timer(cpu_base, true);
1610 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1611 hrtimer_cpu_base_unlock_expiry(cpu_base);
1614 #ifdef CONFIG_HIGH_RES_TIMERS
1617 * High resolution timer interrupt
1618 * Called with interrupts disabled
1620 void hrtimer_interrupt(struct clock_event_device *dev)
1622 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1623 ktime_t expires_next, now, entry_time, delta;
1624 unsigned long flags;
1627 BUG_ON(!cpu_base->hres_active);
1628 cpu_base->nr_events++;
1629 dev->next_event = KTIME_MAX;
1631 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1632 entry_time = now = hrtimer_update_base(cpu_base);
1634 cpu_base->in_hrtirq = 1;
1636 * We set expires_next to KTIME_MAX here with cpu_base->lock
1637 * held to prevent that a timer is enqueued in our queue via
1638 * the migration code. This does not affect enqueueing of
1639 * timers which run their callback and need to be requeued on
1642 cpu_base->expires_next = KTIME_MAX;
1644 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1645 cpu_base->softirq_expires_next = KTIME_MAX;
1646 cpu_base->softirq_activated = 1;
1647 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1650 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1652 /* Reevaluate the clock bases for the next expiry */
1653 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1655 * Store the new expiry value so the migration code can verify
1658 cpu_base->expires_next = expires_next;
1659 cpu_base->in_hrtirq = 0;
1660 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1662 /* Reprogramming necessary ? */
1663 if (!tick_program_event(expires_next, 0)) {
1664 cpu_base->hang_detected = 0;
1669 * The next timer was already expired due to:
1671 * - long lasting callbacks
1672 * - being scheduled away when running in a VM
1674 * We need to prevent that we loop forever in the hrtimer
1675 * interrupt routine. We give it 3 attempts to avoid
1676 * overreacting on some spurious event.
1678 * Acquire base lock for updating the offsets and retrieving
1681 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1682 now = hrtimer_update_base(cpu_base);
1683 cpu_base->nr_retries++;
1687 * Give the system a chance to do something else than looping
1688 * here. We stored the entry time, so we know exactly how long
1689 * we spent here. We schedule the next event this amount of
1692 cpu_base->nr_hangs++;
1693 cpu_base->hang_detected = 1;
1694 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1696 delta = ktime_sub(now, entry_time);
1697 if ((unsigned int)delta > cpu_base->max_hang_time)
1698 cpu_base->max_hang_time = (unsigned int) delta;
1700 * Limit it to a sensible value as we enforce a longer
1701 * delay. Give the CPU at least 100ms to catch up.
1703 if (delta > 100 * NSEC_PER_MSEC)
1704 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1706 expires_next = ktime_add(now, delta);
1707 tick_program_event(expires_next, 1);
1708 pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
1711 /* called with interrupts disabled */
1712 static inline void __hrtimer_peek_ahead_timers(void)
1714 struct tick_device *td;
1716 if (!hrtimer_hres_active())
1719 td = this_cpu_ptr(&tick_cpu_device);
1720 if (td && td->evtdev)
1721 hrtimer_interrupt(td->evtdev);
1724 #else /* CONFIG_HIGH_RES_TIMERS */
1726 static inline void __hrtimer_peek_ahead_timers(void) { }
1728 #endif /* !CONFIG_HIGH_RES_TIMERS */
1731 * Called from run_local_timers in hardirq context every jiffy
1733 void hrtimer_run_queues(void)
1735 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1736 unsigned long flags;
1739 if (__hrtimer_hres_active(cpu_base))
1743 * This _is_ ugly: We have to check periodically, whether we
1744 * can switch to highres and / or nohz mode. The clocksource
1745 * switch happens with xtime_lock held. Notification from
1746 * there only sets the check bit in the tick_oneshot code,
1747 * otherwise we might deadlock vs. xtime_lock.
1749 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1750 hrtimer_switch_to_hres();
1754 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1755 now = hrtimer_update_base(cpu_base);
1757 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1758 cpu_base->softirq_expires_next = KTIME_MAX;
1759 cpu_base->softirq_activated = 1;
1760 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1763 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1764 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1768 * Sleep related functions:
1770 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1772 struct hrtimer_sleeper *t =
1773 container_of(timer, struct hrtimer_sleeper, timer);
1774 struct task_struct *task = t->task;
1778 wake_up_process(task);
1780 return HRTIMER_NORESTART;
1784 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
1785 * @sl: sleeper to be started
1786 * @mode: timer mode abs/rel
1788 * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers
1789 * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
1791 void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
1792 enum hrtimer_mode mode)
1795 * Make the enqueue delivery mode check work on RT. If the sleeper
1796 * was initialized for hard interrupt delivery, force the mode bit.
1797 * This is a special case for hrtimer_sleepers because
1798 * hrtimer_init_sleeper() determines the delivery mode on RT so the
1799 * fiddling with this decision is avoided at the call sites.
1801 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1802 mode |= HRTIMER_MODE_HARD;
1804 hrtimer_start_expires(&sl->timer, mode);
1806 EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
1808 static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
1809 clockid_t clock_id, enum hrtimer_mode mode)
1812 * On PREEMPT_RT enabled kernels hrtimers which are not explicitely
1813 * marked for hard interrupt expiry mode are moved into soft
1814 * interrupt context either for latency reasons or because the
1815 * hrtimer callback takes regular spinlocks or invokes other
1816 * functions which are not suitable for hard interrupt context on
1819 * The hrtimer_sleeper callback is RT compatible in hard interrupt
1820 * context, but there is a latency concern: Untrusted userspace can
1821 * spawn many threads which arm timers for the same expiry time on
1822 * the same CPU. That causes a latency spike due to the wakeup of
1823 * a gazillion threads.
1825 * OTOH, priviledged real-time user space applications rely on the
1826 * low latency of hard interrupt wakeups. If the current task is in
1827 * a real-time scheduling class, mark the mode for hard interrupt
1830 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1831 if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
1832 mode |= HRTIMER_MODE_HARD;
1835 __hrtimer_init(&sl->timer, clock_id, mode);
1836 sl->timer.function = hrtimer_wakeup;
1841 * hrtimer_init_sleeper - initialize sleeper to the given clock
1842 * @sl: sleeper to be initialized
1843 * @clock_id: the clock to be used
1844 * @mode: timer mode abs/rel
1846 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
1847 enum hrtimer_mode mode)
1849 debug_init(&sl->timer, clock_id, mode);
1850 __hrtimer_init_sleeper(sl, clock_id, mode);
1853 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1855 int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
1857 switch(restart->nanosleep.type) {
1858 #ifdef CONFIG_COMPAT_32BIT_TIME
1860 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
1865 if (put_timespec64(ts, restart->nanosleep.rmtp))
1871 return -ERESTART_RESTARTBLOCK;
1874 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1876 struct restart_block *restart;
1879 set_current_state(TASK_INTERRUPTIBLE);
1880 hrtimer_sleeper_start_expires(t, mode);
1882 if (likely(t->task))
1883 freezable_schedule();
1885 hrtimer_cancel(&t->timer);
1886 mode = HRTIMER_MODE_ABS;
1888 } while (t->task && !signal_pending(current));
1890 __set_current_state(TASK_RUNNING);
1895 restart = ¤t->restart_block;
1896 if (restart->nanosleep.type != TT_NONE) {
1897 ktime_t rem = hrtimer_expires_remaining(&t->timer);
1898 struct timespec64 rmt;
1902 rmt = ktime_to_timespec64(rem);
1904 return nanosleep_copyout(restart, &rmt);
1906 return -ERESTART_RESTARTBLOCK;
1909 static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1911 struct hrtimer_sleeper t;
1914 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
1916 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1917 ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
1918 destroy_hrtimer_on_stack(&t.timer);
1922 long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
1923 const clockid_t clockid)
1925 struct restart_block *restart;
1926 struct hrtimer_sleeper t;
1930 slack = current->timer_slack_ns;
1931 if (dl_task(current) || rt_task(current))
1934 hrtimer_init_sleeper_on_stack(&t, clockid, mode);
1935 hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
1936 ret = do_nanosleep(&t, mode);
1937 if (ret != -ERESTART_RESTARTBLOCK)
1940 /* Absolute timers do not update the rmtp value and restart: */
1941 if (mode == HRTIMER_MODE_ABS) {
1942 ret = -ERESTARTNOHAND;
1946 restart = ¤t->restart_block;
1947 restart->fn = hrtimer_nanosleep_restart;
1948 restart->nanosleep.clockid = t.timer.base->clockid;
1949 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1951 destroy_hrtimer_on_stack(&t.timer);
1957 SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
1958 struct __kernel_timespec __user *, rmtp)
1960 struct timespec64 tu;
1962 if (get_timespec64(&tu, rqtp))
1965 if (!timespec64_valid(&tu))
1968 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1969 current->restart_block.nanosleep.rmtp = rmtp;
1970 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1976 #ifdef CONFIG_COMPAT_32BIT_TIME
1978 SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
1979 struct old_timespec32 __user *, rmtp)
1981 struct timespec64 tu;
1983 if (get_old_timespec32(&tu, rqtp))
1986 if (!timespec64_valid(&tu))
1989 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1990 current->restart_block.nanosleep.compat_rmtp = rmtp;
1991 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1997 * Functions related to boot-time initialization:
1999 int hrtimers_prepare_cpu(unsigned int cpu)
2001 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
2004 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2005 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
2007 clock_b->cpu_base = cpu_base;
2008 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
2009 timerqueue_init_head(&clock_b->active);
2012 cpu_base->cpu = cpu;
2013 cpu_base->active_bases = 0;
2014 cpu_base->hres_active = 0;
2015 cpu_base->hang_detected = 0;
2016 cpu_base->next_timer = NULL;
2017 cpu_base->softirq_next_timer = NULL;
2018 cpu_base->expires_next = KTIME_MAX;
2019 cpu_base->softirq_expires_next = KTIME_MAX;
2020 hrtimer_cpu_base_init_expiry_lock(cpu_base);
2024 #ifdef CONFIG_HOTPLUG_CPU
2026 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
2027 struct hrtimer_clock_base *new_base)
2029 struct hrtimer *timer;
2030 struct timerqueue_node *node;
2032 while ((node = timerqueue_getnext(&old_base->active))) {
2033 timer = container_of(node, struct hrtimer, node);
2034 BUG_ON(hrtimer_callback_running(timer));
2035 debug_deactivate(timer);
2038 * Mark it as ENQUEUED not INACTIVE otherwise the
2039 * timer could be seen as !active and just vanish away
2040 * under us on another CPU
2042 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2043 timer->base = new_base;
2045 * Enqueue the timers on the new cpu. This does not
2046 * reprogram the event device in case the timer
2047 * expires before the earliest on this CPU, but we run
2048 * hrtimer_interrupt after we migrated everything to
2049 * sort out already expired timers and reprogram the
2052 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2056 int hrtimers_dead_cpu(unsigned int scpu)
2058 struct hrtimer_cpu_base *old_base, *new_base;
2061 BUG_ON(cpu_online(scpu));
2062 tick_cancel_sched_timer(scpu);
2065 * this BH disable ensures that raise_softirq_irqoff() does
2066 * not wakeup ksoftirqd (and acquire the pi-lock) while
2067 * holding the cpu_base lock
2070 local_irq_disable();
2071 old_base = &per_cpu(hrtimer_bases, scpu);
2072 new_base = this_cpu_ptr(&hrtimer_bases);
2074 * The caller is globally serialized and nobody else
2075 * takes two locks at once, deadlock is not possible.
2077 raw_spin_lock(&new_base->lock);
2078 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2080 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2081 migrate_hrtimer_list(&old_base->clock_base[i],
2082 &new_base->clock_base[i]);
2086 * The migration might have changed the first expiring softirq
2087 * timer on this CPU. Update it.
2089 hrtimer_update_softirq_timer(new_base, false);
2091 raw_spin_unlock(&old_base->lock);
2092 raw_spin_unlock(&new_base->lock);
2094 /* Check, if we got expired work to do */
2095 __hrtimer_peek_ahead_timers();
2101 #endif /* CONFIG_HOTPLUG_CPU */
2103 void __init hrtimers_init(void)
2105 hrtimers_prepare_cpu(smp_processor_id());
2106 open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
2110 * schedule_hrtimeout_range_clock - sleep until timeout
2111 * @expires: timeout value (ktime_t)
2112 * @delta: slack in expires timeout (ktime_t)
2114 * @clock_id: timer clock to be used
2117 schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
2118 const enum hrtimer_mode mode, clockid_t clock_id)
2120 struct hrtimer_sleeper t;
2123 * Optimize when a zero timeout value is given. It does not
2124 * matter whether this is an absolute or a relative time.
2126 if (expires && *expires == 0) {
2127 __set_current_state(TASK_RUNNING);
2132 * A NULL parameter means "infinite"
2139 hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
2140 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2141 hrtimer_sleeper_start_expires(&t, mode);
2146 hrtimer_cancel(&t.timer);
2147 destroy_hrtimer_on_stack(&t.timer);
2149 __set_current_state(TASK_RUNNING);
2151 return !t.task ? 0 : -EINTR;
2155 * schedule_hrtimeout_range - sleep until timeout
2156 * @expires: timeout value (ktime_t)
2157 * @delta: slack in expires timeout (ktime_t)
2160 * Make the current task sleep until the given expiry time has
2161 * elapsed. The routine will return immediately unless
2162 * the current task state has been set (see set_current_state()).
2164 * The @delta argument gives the kernel the freedom to schedule the
2165 * actual wakeup to a time that is both power and performance friendly.
2166 * The kernel give the normal best effort behavior for "@expires+@delta",
2167 * but may decide to fire the timer earlier, but no earlier than @expires.
2169 * You can set the task state as follows -
2171 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2172 * pass before the routine returns unless the current task is explicitly
2173 * woken up, (e.g. by wake_up_process()).
2175 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2176 * delivered to the current task or the current task is explicitly woken
2179 * The current task state is guaranteed to be TASK_RUNNING when this
2182 * Returns 0 when the timer has expired. If the task was woken before the
2183 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2184 * by an explicit wakeup, it returns -EINTR.
2186 int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
2187 const enum hrtimer_mode mode)
2189 return schedule_hrtimeout_range_clock(expires, delta, mode,
2192 EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
2195 * schedule_hrtimeout - sleep until timeout
2196 * @expires: timeout value (ktime_t)
2199 * Make the current task sleep until the given expiry time has
2200 * elapsed. The routine will return immediately unless
2201 * the current task state has been set (see set_current_state()).
2203 * You can set the task state as follows -
2205 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2206 * pass before the routine returns unless the current task is explicitly
2207 * woken up, (e.g. by wake_up_process()).
2209 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2210 * delivered to the current task or the current task is explicitly woken
2213 * The current task state is guaranteed to be TASK_RUNNING when this
2216 * Returns 0 when the timer has expired. If the task was woken before the
2217 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2218 * by an explicit wakeup, it returns -EINTR.
2220 int __sched schedule_hrtimeout(ktime_t *expires,
2221 const enum hrtimer_mode mode)
2223 return schedule_hrtimeout_range(expires, 0, mode);
2225 EXPORT_SYMBOL_GPL(schedule_hrtimeout);