1 // SPDX-License-Identifier: GPL-2.0-only
3 * Infrastructure for migratable timers
5 * Copyright(C) 2022 linutronix GmbH
7 #include <linux/cpuhotplug.h>
8 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/timerqueue.h>
12 #include <trace/events/ipi.h>
14 #include "timer_migration.h"
15 #include "tick-internal.h"
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/timer_migration.h>
21 * The timer migration mechanism is built on a hierarchy of groups. The
22 * lowest level group contains CPUs, the next level groups of CPU groups
23 * and so forth. The CPU groups are kept per node so for the normal case
24 * lock contention won't happen across nodes. Depending on the number of
25 * CPUs per node even the next level might be kept as groups of CPU groups
26 * per node and only the levels above cross the node topology.
28 * Example topology for a two node system with 24 CPUs each.
33 * LVL 1 [GRP1:0] [GRP1:1]
34 * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5
36 * LVL 0 [GRP0:0] [GRP0:1] [GRP0:2] [GRP0:3] [GRP0:4] [GRP0:5]
37 * CPUS 0-7 8-15 16-23 24-31 32-39 40-47
39 * The groups hold a timer queue of events sorted by expiry time. These
40 * queues are updated when CPUs go in idle. When they come out of idle
41 * ignore flag of events is set.
43 * Each group has a designated migrator CPU/group as long as a CPU/group is
44 * active in the group. This designated role is necessary to avoid that all
45 * active CPUs in a group try to migrate expired timers from other CPUs,
46 * which would result in massive lock bouncing.
48 * When a CPU is awake, it checks in it's own timer tick the group
49 * hierarchy up to the point where it is assigned the migrator role or if
50 * no CPU is active, it also checks the groups where no migrator is set
53 * If it finds expired timers in one of the group queues it pulls them over
54 * from the idle CPU and runs the timer function. After that it updates the
55 * group and the parent groups if required.
57 * CPUs which go idle arm their CPU local timer hardware for the next local
58 * (pinned) timer event. If the next migratable timer expires after the
59 * next local timer or the CPU has no migratable timer pending then the
60 * CPU does not queue an event in the LVL0 group. If the next migratable
61 * timer expires before the next local timer then the CPU queues that timer
62 * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0
65 * When CPU comes out of idle and when a group has at least a single active
66 * child, the ignore flag of the tmigr_event is set. This indicates, that
67 * the event is ignored even if it is still enqueued in the parent groups
68 * timer queue. It will be removed when touching the timer queue the next
69 * time. This spares locking in active path as the lock protects (after
70 * setup) only event information. For more information about locking,
71 * please read the section "Locking rules".
73 * If the CPU is the migrator of the group then it delegates that role to
74 * the next active CPU in the group or sets migrator to TMIGR_NONE when
75 * there is no active CPU in the group. This delegation needs to be
76 * propagated up the hierarchy so hand over from other leaves can happen at
77 * all hierarchy levels w/o doing a search.
79 * When the last CPU in the system goes idle, then it drops all migrator
80 * duties up to the top level of the hierarchy (LVL2 in the example). It
81 * then has to make sure, that it arms it's own local hardware timer for
82 * the earliest event in the system.
88 * The groups are built up at init time or when CPUs come online. They are
89 * not destroyed when a group becomes empty due to offlining. The group
90 * just won't participate in the hierarchy management anymore. Destroying
91 * groups would result in interesting race conditions which would just make
92 * the whole mechanism slow and complex.
98 * For setting up new groups and handling events it's required to lock both
99 * child and parent group. The lock ordering is always bottom up. This also
100 * includes the per CPU locks in struct tmigr_cpu. For updating the migrator and
101 * active CPU/group information atomic_try_cmpxchg() is used instead and only
102 * the per CPU tmigr_cpu->lock is held.
104 * During the setup of groups tmigr_level_list is required. It is protected by
107 * When @timer_base->lock as well as tmigr related locks are required, the lock
108 * ordering is: first @timer_base->lock, afterwards tmigr related locks.
111 * Protection of the tmigr group state information:
112 * ------------------------------------------------
114 * The state information with the list of active children and migrator needs to
115 * be protected by a sequence counter. It prevents a race when updates in child
116 * groups are propagated in changed order. The state update is performed
117 * lockless and group wise. The following scenario describes what happens
118 * without updating the sequence counter:
120 * Therefore, let's take three groups and four CPUs (CPU2 and CPU3 as well
121 * as GRP0:1 will not change during the scenario):
125 * active = GRP0:0, GRP0:1
127 * LVL 0 [GRP0:0] [GRP0:1]
128 * migrator = CPU0 migrator = CPU2
129 * active = CPU0 active = CPU2
132 * active idle active idle
135 * 1. CPU0 goes idle. As the update is performed group wise, in the first step
136 * only GRP0:0 is updated. The update of GRP1:0 is pending as CPU0 has to
137 * walk the hierarchy.
141 * active = GRP0:0, GRP0:1
143 * LVL 0 [GRP0:0] [GRP0:1]
144 * --> migrator = TMIGR_NONE migrator = CPU2
145 * --> active = active = CPU2
148 * --> idle idle active idle
150 * 2. While CPU0 goes idle and continues to update the state, CPU1 comes out of
151 * idle. CPU1 updates GRP0:0. The update for GRP1:0 is pending as CPU1 also
152 * has to walk the hierarchy. Both CPUs (CPU0 and CPU1) now walk the
153 * hierarchy to perform the needed update from their point of view. The
154 * currently visible state looks the following:
158 * active = GRP0:0, GRP0:1
160 * LVL 0 [GRP0:0] [GRP0:1]
161 * --> migrator = CPU1 migrator = CPU2
162 * --> active = CPU1 active = CPU2
165 * idle --> active active idle
167 * 3. Here is the race condition: CPU1 managed to propagate its changes (from
168 * step 2) through the hierarchy to GRP1:0 before CPU0 (step 1) did. The
169 * active members of GRP1:0 remain unchanged after the update since it is
170 * still valid from CPU1 current point of view:
173 * --> migrator = GRP0:1
174 * --> active = GRP0:0, GRP0:1
176 * LVL 0 [GRP0:0] [GRP0:1]
177 * migrator = CPU1 migrator = CPU2
178 * active = CPU1 active = CPU2
181 * idle active active idle
183 * 4. Now CPU0 finally propagates its changes (from step 1) to GRP1:0.
186 * --> migrator = GRP0:1
187 * --> active = GRP0:1
189 * LVL 0 [GRP0:0] [GRP0:1]
190 * migrator = CPU1 migrator = CPU2
191 * active = CPU1 active = CPU2
194 * idle active active idle
197 * The race of CPU0 vs. CPU1 led to an inconsistent state in GRP1:0. CPU1 is
198 * active and is correctly listed as active in GRP0:0. However GRP1:0 does not
199 * have GRP0:0 listed as active, which is wrong. The sequence counter has been
200 * added to avoid inconsistent states during updates. The state is updated
201 * atomically only if all members, including the sequence counter, match the
202 * expected value (compare-and-exchange).
204 * Looking back at the previous example with the addition of the sequence
205 * counter: The update as performed by CPU0 in step 4 will fail. CPU1 changed
206 * the sequence number during the update in step 3 so the expected old value (as
207 * seen by CPU0 before starting the walk) does not match.
209 * Prevent race between new event and last CPU going inactive
210 * ----------------------------------------------------------
212 * When the last CPU is going idle and there is a concurrent update of a new
213 * first global timer of an idle CPU, the group and child states have to be read
214 * while holding the lock in tmigr_update_events(). The following scenario shows
215 * what happens, when this is not done.
217 * 1. Only CPU2 is active:
222 * next_expiry = KTIME_MAX
224 * LVL 0 [GRP0:0] [GRP0:1]
225 * migrator = TMIGR_NONE migrator = CPU2
226 * active = active = CPU2
227 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
230 * idle idle active idle
232 * 2. Now CPU 2 goes idle (and has no global timer, that has to be handled) and
233 * propagates that to GRP0:1:
238 * next_expiry = KTIME_MAX
240 * LVL 0 [GRP0:0] [GRP0:1]
241 * migrator = TMIGR_NONE --> migrator = TMIGR_NONE
242 * active = --> active =
243 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
246 * idle idle --> idle idle
248 * 3. Now the idle state is propagated up to GRP1:0. As this is now the last
249 * child going idle in top level group, the expiry of the next group event
250 * has to be handed back to make sure no event is lost. As there is no event
251 * enqueued, KTIME_MAX is handed back to CPU2.
254 * --> migrator = TMIGR_NONE
256 * next_expiry = KTIME_MAX
258 * LVL 0 [GRP0:0] [GRP0:1]
259 * migrator = TMIGR_NONE migrator = TMIGR_NONE
261 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
264 * idle idle --> idle idle
266 * 4. CPU 0 has a new timer queued from idle and it expires at TIMER0. CPU0
267 * propagates that to GRP0:0:
270 * migrator = TMIGR_NONE
272 * next_expiry = KTIME_MAX
274 * LVL 0 [GRP0:0] [GRP0:1]
275 * migrator = TMIGR_NONE migrator = TMIGR_NONE
277 * --> next_expiry = TIMER0 next_expiry = KTIME_MAX
280 * idle idle idle idle
282 * 5. GRP0:0 is not active, so the new timer has to be propagated to
283 * GRP1:0. Therefore the GRP1:0 state has to be read. When the stalled value
284 * (from step 2) is read, the timer is enqueued into GRP1:0, but nothing is
285 * handed back to CPU0, as it seems that there is still an active child in
289 * migrator = TMIGR_NONE
291 * --> next_expiry = TIMER0
293 * LVL 0 [GRP0:0] [GRP0:1]
294 * migrator = TMIGR_NONE migrator = TMIGR_NONE
296 * next_expiry = TIMER0 next_expiry = KTIME_MAX
299 * idle idle idle idle
301 * This is prevented by reading the state when holding the lock (when a new
302 * timer has to be propagated from idle path)::
304 * CPU2 (tmigr_inactive_up()) CPU0 (tmigr_new_timer_up())
305 * -------------------------- ---------------------------
307 * cmpxchg(&GRP1:0->state);
308 * tmigr_update_events() {
309 * spin_lock(&GRP1:0->lock);
310 * // ... update events ...
311 * // hand back first expiry when GRP1:0 is idle
312 * spin_unlock(&GRP1:0->lock);
313 * // ^^^ release state modification
315 * tmigr_update_events() {
316 * spin_lock(&GRP1:0->lock)
317 * // ^^^ acquire state modification
318 * group_state = atomic_read(&GRP1:0->state)
319 * // .... update events ...
320 * // hand back first expiry when GRP1:0 is idle
321 * spin_unlock(&GRP1:0->lock) <3>
322 * // ^^^ makes state visible for other
323 * // callers of tmigr_new_timer_up()
326 * When CPU0 grabs the lock directly after cmpxchg, the first timer is reported
327 * back to CPU0 and also later on to CPU2. So no timer is missed. A concurrent
328 * update of the group state from active path is no problem, as the upcoming CPU
329 * will take care of the group events.
331 * Required event and timerqueue update after a remote expiry:
332 * -----------------------------------------------------------
334 * After expiring timers of a remote CPU, a walk through the hierarchy and
335 * update of events and timerqueues is required. It is obviously needed if there
336 * is a 'new' global timer but also if there is no new global timer but the
337 * remote CPU is still idle.
339 * 1. CPU0 and CPU1 are idle and have both a global timer expiring at the same
340 * time. So both have an event enqueued in the timerqueue of GRP0:0. CPU3 is
341 * also idle and has no global timer pending. CPU2 is the only active CPU and
342 * thus also the migrator:
347 * --> timerqueue = evt-GRP0:0
349 * LVL 0 [GRP0:0] [GRP0:1]
350 * migrator = TMIGR_NONE migrator = CPU2
351 * active = active = CPU2
352 * groupevt.ignore = false groupevt.ignore = true
353 * groupevt.cpu = CPU0 groupevt.cpu =
354 * timerqueue = evt-CPU0, timerqueue =
358 * idle idle active idle
360 * 2. CPU2 starts to expire remote timers. It starts with LVL0 group
361 * GRP0:1. There is no event queued in the timerqueue, so CPU2 continues with
362 * the parent of GRP0:1: GRP1:0. In GRP1:0 it dequeues the first event. It
363 * looks at tmigr_event::cpu struct member and expires the pending timer(s)
371 * LVL 0 [GRP0:0] [GRP0:1]
372 * migrator = TMIGR_NONE migrator = CPU2
373 * active = active = CPU2
374 * groupevt.ignore = false groupevt.ignore = true
375 * --> groupevt.cpu = CPU0 groupevt.cpu =
376 * timerqueue = evt-CPU0, timerqueue =
380 * idle idle active idle
382 * 3. Some work has to be done after expiring the timers of CPU0. If we stop
383 * here, then CPU1's pending global timer(s) will not expire in time and the
384 * timerqueue of GRP0:0 has still an event for CPU0 enqueued which has just
385 * been processed. So it is required to walk the hierarchy from CPU0's point
386 * of view and update it accordingly. CPU0's event will be removed from the
387 * timerqueue because it has no pending timer. If CPU0 would have a timer
388 * pending then it has to expire after CPU1's first timer because all timers
389 * from this period were just expired. Either way CPU1's event will be first
390 * in GRP0:0's timerqueue and therefore set in the CPU field of the group
391 * event which is then enqueued in GRP1:0's timerqueue as GRP0:0 is still not
397 * --> timerqueue = evt-GRP0:0
399 * LVL 0 [GRP0:0] [GRP0:1]
400 * migrator = TMIGR_NONE migrator = CPU2
401 * active = active = CPU2
402 * groupevt.ignore = false groupevt.ignore = true
403 * --> groupevt.cpu = CPU1 groupevt.cpu =
404 * --> timerqueue = evt-CPU1 timerqueue =
407 * idle idle active idle
409 * Now CPU2 (migrator) will continue step 2 at GRP1:0 and will expire the
412 * The hierarchy walk in step 3 can be skipped if the migrator notices that a
413 * CPU of GRP0:0 is active again. The CPU will mark GRP0:0 active and take care
414 * of the group as migrator and any needed updates within the hierarchy.
417 static DEFINE_MUTEX(tmigr_mutex);
418 static struct list_head *tmigr_level_list __read_mostly;
420 static unsigned int tmigr_hierarchy_levels __read_mostly;
421 static unsigned int tmigr_crossnode_level __read_mostly;
423 static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
425 #define TMIGR_NONE 0xFF
428 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
430 return !(tmc->tmgroup && tmc->online);
434 * Returns true, when @childmask corresponds to the group migrator or when the
435 * group is not active - so no migrator is set.
437 static bool tmigr_check_migrator(struct tmigr_group *group, u8 childmask)
441 s.state = atomic_read(&group->migr_state);
443 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
449 static bool tmigr_check_migrator_and_lonely(struct tmigr_group *group, u8 childmask)
451 bool lonely, migrator = false;
452 unsigned long active;
455 s.state = atomic_read(&group->migr_state);
457 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
461 lonely = bitmap_weight(&active, BIT_CNT) <= 1;
463 return (migrator && lonely);
466 static bool tmigr_check_lonely(struct tmigr_group *group)
468 unsigned long active;
471 s.state = atomic_read(&group->migr_state);
475 return bitmap_weight(&active, BIT_CNT) <= 1;
478 typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, void *);
480 static void __walk_groups(up_f up, void *data,
481 struct tmigr_cpu *tmc)
483 struct tmigr_group *child = NULL, *group = tmc->tmgroup;
486 WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
488 if (up(group, child, data))
492 group = group->parent;
496 static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc)
498 lockdep_assert_held(&tmc->lock);
500 __walk_groups(up, data, tmc);
504 * struct tmigr_walk - data required for walking the hierarchy
505 * @nextexp: Next CPU event expiry information which is handed into
506 * the timer migration code by the timer code
507 * (get_next_timer_interrupt())
508 * @firstexp: Contains the first event expiry information when last
509 * active CPU of hierarchy is on the way to idle to make
510 * sure CPU will be back in time.
511 * @evt: Pointer to tmigr_event which needs to be queued (of idle
513 * @childmask: childmask of child group
514 * @remote: Is set, when the new timer path is executed in
515 * tmigr_handle_remote_cpu()
520 struct tmigr_event *evt;
526 * struct tmigr_remote_data - data required for remote expiry hierarchy walk
527 * @basej: timer base in jiffies
528 * @now: timer base monotonic
529 * @firstexp: returns expiry of the first timer in the idle timer
530 * migration hierarchy to make sure the timer is handled in
531 * time; it is stored in the per CPU tmigr_cpu struct of
532 * CPU which expires remote timers
533 * @childmask: childmask of child group
534 * @check: is set if there is the need to handle remote timers;
535 * required in tmigr_requires_handle_remote() only
536 * @tmc_active: this flag indicates, whether the CPU which triggers
537 * the hierarchy walk is !idle in the timer migration
538 * hierarchy. When the CPU is idle and the whole hierarchy is
539 * idle, only the first event of the top level has to be
542 struct tmigr_remote_data {
552 * Returns the next event of the timerqueue @group->events
554 * Removes timers with ignore flag and update next_expiry of the group. Values
555 * of the group event are updated in tmigr_update_events() only.
557 static struct tmigr_event *tmigr_next_groupevt(struct tmigr_group *group)
559 struct timerqueue_node *node = NULL;
560 struct tmigr_event *evt = NULL;
562 lockdep_assert_held(&group->lock);
564 WRITE_ONCE(group->next_expiry, KTIME_MAX);
566 while ((node = timerqueue_getnext(&group->events))) {
567 evt = container_of(node, struct tmigr_event, nextevt);
570 WRITE_ONCE(group->next_expiry, evt->nextevt.expires);
575 * Remove next timers with ignore flag, because the group lock
578 if (!timerqueue_del(&group->events, node))
586 * Return the next event (with the expiry equal or before @now)
588 * Event, which is returned, is also removed from the queue.
590 static struct tmigr_event *tmigr_next_expired_groupevt(struct tmigr_group *group,
593 struct tmigr_event *evt = tmigr_next_groupevt(group);
595 if (!evt || now < evt->nextevt.expires)
599 * The event is ready to expire. Remove it and update next group event.
601 timerqueue_del(&group->events, &evt->nextevt);
602 tmigr_next_groupevt(group);
607 static u64 tmigr_next_groupevt_expires(struct tmigr_group *group)
609 struct tmigr_event *evt;
611 evt = tmigr_next_groupevt(group);
616 return evt->nextevt.expires;
619 static bool tmigr_active_up(struct tmigr_group *group,
620 struct tmigr_group *child,
623 union tmigr_state curstate, newstate;
624 struct tmigr_walk *data = ptr;
628 childmask = data->childmask;
630 * No memory barrier is required here in contrast to
631 * tmigr_inactive_up(), as the group state change does not depend on the
634 curstate.state = atomic_read(&group->migr_state);
640 if (newstate.migrator == TMIGR_NONE) {
641 newstate.migrator = childmask;
643 /* Changes need to be propagated */
647 newstate.active |= childmask;
650 } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state));
652 if ((walk_done == false) && group->parent)
653 data->childmask = group->childmask;
656 * The group is active (again). The group event might be still queued
657 * into the parent group's timerqueue but can now be handled by the
658 * migrator of this group. Therefore the ignore flag for the group event
659 * is updated to reflect this.
661 * The update of the ignore flag in the active path is done lockless. In
662 * worst case the migrator of the parent group observes the change too
663 * late and expires remotely all events belonging to this group. The
664 * lock is held while updating the ignore flag in idle path. So this
665 * state change will not be lost.
667 group->groupevt.ignore = true;
669 trace_tmigr_group_set_cpu_active(group, newstate, childmask);
674 static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
676 struct tmigr_walk data;
678 data.childmask = tmc->childmask;
680 trace_tmigr_cpu_active(tmc);
682 tmc->cpuevt.ignore = true;
683 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
685 walk_groups(&tmigr_active_up, &data, tmc);
689 * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy
691 * Call site timer_clear_idle() is called with interrupts disabled.
693 void tmigr_cpu_activate(void)
695 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
697 if (tmigr_is_not_available(tmc))
700 if (WARN_ON_ONCE(!tmc->idle))
703 raw_spin_lock(&tmc->lock);
705 __tmigr_cpu_activate(tmc);
706 raw_spin_unlock(&tmc->lock);
710 * Returns true, if there is nothing to be propagated to the next level
712 * @data->firstexp is set to expiry of first gobal event of the (top level of
713 * the) hierarchy, but only when hierarchy is completely idle.
715 * The child and group states need to be read under the lock, to prevent a race
716 * against a concurrent tmigr_inactive_up() run when the last CPU goes idle. See
717 * also section "Prevent race between new event and last CPU going inactive" in
718 * the documentation at the top.
720 * This is the only place where the group event expiry value is set.
723 bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
724 struct tmigr_walk *data)
726 struct tmigr_event *evt, *first_childevt;
727 union tmigr_state childstate, groupstate;
728 bool remote = data->remote;
729 bool walk_done = false;
733 raw_spin_lock(&child->lock);
734 raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
736 childstate.state = atomic_read(&child->migr_state);
737 groupstate.state = atomic_read(&group->migr_state);
739 if (childstate.active) {
744 first_childevt = tmigr_next_groupevt(child);
745 nextexp = child->next_expiry;
746 evt = &child->groupevt;
748 evt->ignore = (nextexp == KTIME_MAX) ? true : false;
750 nextexp = data->nextexp;
752 first_childevt = evt = data->evt;
754 raw_spin_lock(&group->lock);
756 childstate.state = 0;
757 groupstate.state = atomic_read(&group->migr_state);
761 * If the child event is already queued in the group, remove it from the
762 * queue when the expiry time changed only or when it could be ignored.
764 if (timerqueue_node_queued(&evt->nextevt)) {
765 if ((evt->nextevt.expires == nextexp) && !evt->ignore)
768 if (!timerqueue_del(&group->events, &evt->nextevt))
769 WRITE_ONCE(group->next_expiry, KTIME_MAX);
774 * When the next child event could be ignored (nextexp is
775 * KTIME_MAX) and there was no remote timer handling before or
776 * the group is already active, there is no need to walk the
777 * hierarchy even if there is a parent group.
779 * The other way round: even if the event could be ignored, but
780 * if a remote timer handling was executed before and the group
781 * is not active, walking the hierarchy is required to not miss
782 * an enqueued timer in the non active group. The enqueued timer
783 * of the group needs to be propagated to a higher level to
784 * ensure it is handled.
786 if (!remote || groupstate.active)
789 evt->nextevt.expires = nextexp;
790 evt->cpu = first_childevt->cpu;
792 if (timerqueue_add(&group->events, &evt->nextevt))
793 WRITE_ONCE(group->next_expiry, nextexp);
797 if (!group->parent && (groupstate.migrator == TMIGR_NONE)) {
801 * Nothing to do when update was done during remote timer
802 * handling. First timer in top level group which needs to be
803 * handled when top level group is not active, is calculated
804 * directly in tmigr_handle_remote_up().
810 * The top level group is idle and it has to be ensured the
811 * global timers are handled in time. (This could be optimized
812 * by keeping track of the last global scheduled event and only
813 * arming it on the CPU if the new event is earlier. Not sure if
814 * its worth the complexity.)
816 data->firstexp = tmigr_next_groupevt_expires(group);
819 trace_tmigr_update_events(child, group, childstate, groupstate,
823 raw_spin_unlock(&group->lock);
826 raw_spin_unlock(&child->lock);
831 static bool tmigr_new_timer_up(struct tmigr_group *group,
832 struct tmigr_group *child,
835 struct tmigr_walk *data = ptr;
837 return tmigr_update_events(group, child, data);
841 * Returns the expiry of the next timer that needs to be handled. KTIME_MAX is
842 * returned, if an active CPU will handle all the timer migration hierarchy
845 static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp)
847 struct tmigr_walk data = { .nextexp = nextexp,
848 .firstexp = KTIME_MAX,
849 .evt = &tmc->cpuevt };
851 lockdep_assert_held(&tmc->lock);
856 trace_tmigr_cpu_new_timer(tmc);
858 tmc->cpuevt.ignore = false;
861 walk_groups(&tmigr_new_timer_up, &data, tmc);
863 /* If there is a new first global event, make sure it is handled */
864 return data.firstexp;
867 static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
870 struct timer_events tevt;
871 struct tmigr_walk data;
872 struct tmigr_cpu *tmc;
874 tmc = per_cpu_ptr(&tmigr_cpu, cpu);
876 raw_spin_lock_irq(&tmc->lock);
879 * If the remote CPU is offline then the timers have been migrated to
882 * If tmigr_cpu::remote is set, at the moment another CPU already
883 * expires the timers of the remote CPU.
885 * If tmigr_event::ignore is set, then the CPU returns from idle and
886 * takes care of its timers.
888 * If the next event expires in the future, then the event has been
889 * updated and there are no timers to expire right now. The CPU which
890 * updated the event takes care when hierarchy is completely
891 * idle. Otherwise the migrator does it as the event is enqueued.
893 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
894 now < tmc->cpuevt.nextevt.expires) {
895 raw_spin_unlock_irq(&tmc->lock);
899 trace_tmigr_handle_remote_cpu(tmc);
902 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
904 /* Drop the lock to allow the remote CPU to exit idle */
905 raw_spin_unlock_irq(&tmc->lock);
907 if (cpu != smp_processor_id())
908 timer_expire_remote(cpu);
911 * Lock ordering needs to be preserved - timer_base locks before tmigr
912 * related locks (see section "Locking rules" in the documentation at
913 * the top). During fetching the next timer interrupt, also tmc->lock
914 * needs to be held. Otherwise there is a possible race window against
915 * the CPU itself when it comes out of idle, updates the first timer in
916 * the hierarchy and goes back to idle.
918 * timer base locks are dropped as fast as possible: After checking
919 * whether the remote CPU went offline in the meantime and after
920 * fetching the next remote timer interrupt. Dropping the locks as fast
921 * as possible keeps the locking region small and prevents holding
922 * several (unnecessary) locks during walking the hierarchy for updating
923 * the timerqueue and group events.
926 timer_lock_remote_bases(cpu);
927 raw_spin_lock(&tmc->lock);
930 * When the CPU went offline in the meantime, no hierarchy walk has to
931 * be done for updating the queued events, because the walk was
932 * already done during marking the CPU offline in the hierarchy.
934 * When the CPU is no longer idle, the CPU takes care of the timers and
935 * also of the timers in the hierarchy.
937 * (See also section "Required event and timerqueue update after a
938 * remote expiry" in the documentation at the top)
940 if (!tmc->online || !tmc->idle) {
941 timer_unlock_remote_bases(cpu);
945 /* next event of CPU */
946 fetch_next_timer_interrupt_remote(jif, now, &tevt, cpu);
947 timer_unlock_remote_bases(cpu);
949 data.nextexp = tevt.global;
950 data.firstexp = KTIME_MAX;
951 data.evt = &tmc->cpuevt;
955 * The update is done even when there is no 'new' global timer pending
956 * on the remote CPU (see section "Required event and timerqueue update
957 * after a remote expiry" in the documentation at the top)
959 walk_groups(&tmigr_new_timer_up, &data, tmc);
963 raw_spin_unlock_irq(&tmc->lock);
966 static bool tmigr_handle_remote_up(struct tmigr_group *group,
967 struct tmigr_group *child,
970 struct tmigr_remote_data *data = ptr;
971 struct tmigr_event *evt;
979 childmask = data->childmask;
981 trace_tmigr_handle_remote(group);
984 * Handle the group only if @childmask is the migrator or if the
985 * group has no migrator. Otherwise the group is active and is
986 * handled by its own migrator.
988 if (!tmigr_check_migrator(group, childmask))
991 raw_spin_lock_irq(&group->lock);
993 evt = tmigr_next_expired_groupevt(group, now);
996 unsigned int remote_cpu = evt->cpu;
998 raw_spin_unlock_irq(&group->lock);
1000 tmigr_handle_remote_cpu(remote_cpu, now, jif);
1002 /* check if there is another event, that needs to be handled */
1007 * Update of childmask for the next level and keep track of the expiry
1008 * of the first event that needs to be handled (group->next_expiry was
1009 * updated by tmigr_next_expired_groupevt(), next was set by
1010 * tmigr_handle_remote_cpu()).
1012 data->childmask = group->childmask;
1013 data->firstexp = group->next_expiry;
1015 raw_spin_unlock_irq(&group->lock);
1021 * tmigr_handle_remote() - Handle global timers of remote idle CPUs
1023 * Called from the timer soft interrupt with interrupts enabled.
1025 void tmigr_handle_remote(void)
1027 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1028 struct tmigr_remote_data data;
1030 if (tmigr_is_not_available(tmc))
1033 data.childmask = tmc->childmask;
1034 data.firstexp = KTIME_MAX;
1037 * NOTE: This is a doubled check because the migrator test will be done
1038 * in tmigr_handle_remote_up() anyway. Keep this check to speed up the
1039 * return when nothing has to be done.
1041 if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask))
1044 data.now = get_jiffies_update(&data.basej);
1047 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to
1048 * KTIME_MAX. Even if tmc->lock is not held during the whole remote
1049 * handling, tmc->wakeup is fine to be stale as it is called in
1050 * interrupt context and tick_nohz_next_event() is executed in interrupt
1051 * exit path only after processing the last pending interrupt.
1054 __walk_groups(&tmigr_handle_remote_up, &data, tmc);
1056 raw_spin_lock_irq(&tmc->lock);
1057 WRITE_ONCE(tmc->wakeup, data.firstexp);
1058 raw_spin_unlock_irq(&tmc->lock);
1061 static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
1062 struct tmigr_group *child,
1065 struct tmigr_remote_data *data = ptr;
1068 childmask = data->childmask;
1071 * Handle the group only if the child is the migrator or if the group
1072 * has no migrator. Otherwise the group is active and is handled by its
1075 if (!tmigr_check_migrator(group, childmask))
1079 * When there is a parent group and the CPU which triggered the
1080 * hierarchy walk is not active, proceed the walk to reach the top level
1081 * group before reading the next_expiry value.
1083 if (group->parent && !data->tmc_active)
1087 * The lock is required on 32bit architectures to read the variable
1088 * consistently with a concurrent writer. On 64bit the lock is not
1089 * required because the read operation is not split and so it is always
1092 if (IS_ENABLED(CONFIG_64BIT)) {
1093 data->firstexp = READ_ONCE(group->next_expiry);
1094 if (data->now >= data->firstexp) {
1099 raw_spin_lock(&group->lock);
1100 data->firstexp = group->next_expiry;
1101 if (data->now >= group->next_expiry) {
1103 raw_spin_unlock(&group->lock);
1106 raw_spin_unlock(&group->lock);
1110 /* Update of childmask for the next level */
1111 data->childmask = group->childmask;
1116 * tmigr_requires_handle_remote() - Check the need of remote timer handling
1118 * Must be called with interrupts disabled.
1120 bool tmigr_requires_handle_remote(void)
1122 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1123 struct tmigr_remote_data data;
1127 if (tmigr_is_not_available(tmc))
1130 data.now = get_jiffies_update(&jif);
1131 data.childmask = tmc->childmask;
1132 data.firstexp = KTIME_MAX;
1133 data.tmc_active = !tmc->idle;
1137 * If the CPU is active, walk the hierarchy to check whether a remote
1138 * expiry is required.
1140 * Check is done lockless as interrupts are disabled and @tmc->idle is
1141 * set only by the local CPU.
1144 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc);
1150 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock
1151 * is required on 32bit architectures to read the variable consistently
1152 * with a concurrent writer. On 64bit the lock is not required because
1153 * the read operation is not split and so it is always consistent.
1155 if (IS_ENABLED(CONFIG_64BIT)) {
1156 if (data.now >= READ_ONCE(tmc->wakeup))
1159 raw_spin_lock(&tmc->lock);
1160 if (data.now >= tmc->wakeup)
1162 raw_spin_unlock(&tmc->lock);
1169 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1170 * @nextexp: Next expiry of global timer (or KTIME_MAX if not)
1172 * The CPU is already deactivated in the timer migration
1173 * hierarchy. tick_nohz_get_sleep_length() calls tick_nohz_next_event()
1174 * and thereby the timer idle path is executed once more. @tmc->wakeup
1175 * holds the first timer, when the timer migration hierarchy is
1178 * Returns the first timer that needs to be handled by this CPU or KTIME_MAX if
1179 * nothing needs to be done.
1181 u64 tmigr_cpu_new_timer(u64 nextexp)
1183 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1186 if (tmigr_is_not_available(tmc))
1189 raw_spin_lock(&tmc->lock);
1191 ret = READ_ONCE(tmc->wakeup);
1192 if (nextexp != KTIME_MAX) {
1193 if (nextexp != tmc->cpuevt.nextevt.expires ||
1194 tmc->cpuevt.ignore) {
1195 ret = tmigr_new_timer(tmc, nextexp);
1199 * Make sure the reevaluation of timers in idle path will not miss an
1202 WRITE_ONCE(tmc->wakeup, ret);
1204 trace_tmigr_cpu_new_timer_idle(tmc, nextexp);
1205 raw_spin_unlock(&tmc->lock);
1209 static bool tmigr_inactive_up(struct tmigr_group *group,
1210 struct tmigr_group *child,
1213 union tmigr_state curstate, newstate, childstate;
1214 struct tmigr_walk *data = ptr;
1218 childmask = data->childmask;
1219 childstate.state = 0;
1222 * The memory barrier is paired with the cmpxchg() in tmigr_active_up()
1223 * to make sure the updates of child and group states are ordered. The
1224 * ordering is mandatory, as the group state change depends on the child
1227 curstate.state = atomic_read_acquire(&group->migr_state);
1231 childstate.state = atomic_read(&child->migr_state);
1233 newstate = curstate;
1236 /* Reset active bit when the child is no longer active */
1237 if (!childstate.active)
1238 newstate.active &= ~childmask;
1240 if (newstate.migrator == childmask) {
1242 * Find a new migrator for the group, because the child
1245 if (!childstate.active) {
1246 unsigned long new_migr_bit, active = newstate.active;
1248 new_migr_bit = find_first_bit(&active, BIT_CNT);
1250 if (new_migr_bit != BIT_CNT) {
1251 newstate.migrator = BIT(new_migr_bit);
1253 newstate.migrator = TMIGR_NONE;
1255 /* Changes need to be propagated */
1263 WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active));
1265 if (atomic_try_cmpxchg(&group->migr_state, &curstate.state,
1270 * The memory barrier is paired with the cmpxchg() in
1271 * tmigr_active_up() to make sure the updates of child and group
1272 * states are ordered. It is required only when the above
1273 * try_cmpxchg() fails.
1275 smp_mb__after_atomic();
1278 data->remote = false;
1280 /* Event Handling */
1281 tmigr_update_events(group, child, data);
1283 if (group->parent && (walk_done == false))
1284 data->childmask = group->childmask;
1287 * data->firstexp was set by tmigr_update_events() and contains the
1288 * expiry of the first global event which needs to be handled. It
1289 * differs from KTIME_MAX if:
1290 * - group is the top level group and
1291 * - group is idle (which means CPU was the last active CPU in the
1293 * - there is a pending event in the hierarchy
1295 WARN_ON_ONCE(data->firstexp != KTIME_MAX && group->parent);
1297 trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
1302 static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
1304 struct tmigr_walk data = { .nextexp = nextexp,
1305 .firstexp = KTIME_MAX,
1306 .evt = &tmc->cpuevt,
1307 .childmask = tmc->childmask };
1310 * If nextexp is KTIME_MAX, the CPU event will be ignored because the
1311 * local timer expires before the global timer, no global timer is set
1312 * or CPU goes offline.
1314 if (nextexp != KTIME_MAX)
1315 tmc->cpuevt.ignore = false;
1317 walk_groups(&tmigr_inactive_up, &data, tmc);
1318 return data.firstexp;
1322 * tmigr_cpu_deactivate() - Put current CPU into inactive state
1323 * @nextexp: The next global timer expiry of the current CPU
1325 * Must be called with interrupts disabled.
1327 * Return: the next event expiry of the current CPU or the next event expiry
1328 * from the hierarchy if this CPU is the top level migrator or the hierarchy is
1331 u64 tmigr_cpu_deactivate(u64 nextexp)
1333 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1336 if (tmigr_is_not_available(tmc))
1339 raw_spin_lock(&tmc->lock);
1341 ret = __tmigr_cpu_deactivate(tmc, nextexp);
1346 * Make sure the reevaluation of timers in idle path will not miss an
1349 WRITE_ONCE(tmc->wakeup, ret);
1351 trace_tmigr_cpu_idle(tmc, nextexp);
1352 raw_spin_unlock(&tmc->lock);
1357 * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to
1359 * @nextevt: The next global timer expiry of the current CPU
1362 * * KTIME_MAX - when it is probable that nothing has to be done (not
1363 * the only one in the level 0 group; and if it is the
1364 * only one in level 0 group, but there are more than a
1365 * single group active on the way to top level)
1366 * * nextevt - when CPU is offline and has to handle timer on his own
1367 * or when on the way to top in every group only a single
1368 * child is active but @nextevt is before the lowest
1369 * next_expiry encountered while walking up to top level.
1370 * * next_expiry - value of lowest expiry encountered while walking groups
1371 * if only a single child is active on each and @nextevt
1372 * is after this lowest expiry.
1374 u64 tmigr_quick_check(u64 nextevt)
1376 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1377 struct tmigr_group *group = tmc->tmgroup;
1379 if (tmigr_is_not_available(tmc))
1382 if (WARN_ON_ONCE(tmc->idle))
1385 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask))
1389 if (!tmigr_check_lonely(group)) {
1393 * Since current CPU is active, events may not be sorted
1394 * from bottom to the top because the CPU's event is ignored
1395 * up to the top and its sibling's events not propagated upwards.
1396 * Thus keep track of the lowest observed expiry.
1398 nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry));
1402 group = group->parent;
1408 static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
1411 union tmigr_state s;
1413 raw_spin_lock_init(&group->lock);
1416 group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE;
1418 group->num_children = 0;
1420 s.migrator = TMIGR_NONE;
1423 atomic_set(&group->migr_state, s.state);
1425 timerqueue_init_head(&group->events);
1426 timerqueue_init(&group->groupevt.nextevt);
1427 group->groupevt.nextevt.expires = KTIME_MAX;
1428 WRITE_ONCE(group->next_expiry, KTIME_MAX);
1429 group->groupevt.ignore = true;
1432 static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
1435 struct tmigr_group *tmp, *group = NULL;
1437 lockdep_assert_held(&tmigr_mutex);
1439 /* Try to attach to an existing group first */
1440 list_for_each_entry(tmp, &tmigr_level_list[lvl], list) {
1442 * If @lvl is below the cross NUMA node level, check whether
1443 * this group belongs to the same NUMA node.
1445 if (lvl < tmigr_crossnode_level && tmp->numa_node != node)
1448 /* Capacity left? */
1449 if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP)
1453 * TODO: A possible further improvement: Make sure that all CPU
1454 * siblings end up in the same group of the lowest level of the
1455 * hierarchy. Rely on the topology sibling mask would be a
1456 * reasonable solution.
1466 /* Allocate and set up a new group */
1467 group = kzalloc_node(sizeof(*group), GFP_KERNEL, node);
1469 return ERR_PTR(-ENOMEM);
1471 tmigr_init_group(group, lvl, node);
1473 /* Setup successful. Add it to the hierarchy */
1474 list_add(&group->list, &tmigr_level_list[lvl]);
1475 trace_tmigr_group_set(group);
1479 static void tmigr_connect_child_parent(struct tmigr_group *child,
1480 struct tmigr_group *parent)
1482 union tmigr_state childstate;
1484 raw_spin_lock_irq(&child->lock);
1485 raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
1487 child->parent = parent;
1488 child->childmask = BIT(parent->num_children++);
1490 raw_spin_unlock(&parent->lock);
1491 raw_spin_unlock_irq(&child->lock);
1493 trace_tmigr_connect_child_parent(child);
1496 * To prevent inconsistent states, active children need to be active in
1497 * the new parent as well. Inactive children are already marked inactive
1498 * in the parent group:
1500 * * When new groups were created by tmigr_setup_groups() starting from
1501 * the lowest level (and not higher then one level below the current
1502 * top level), then they are not active. They will be set active when
1503 * the new online CPU comes active.
1505 * * But if a new group above the current top level is required, it is
1506 * mandatory to propagate the active state of the already existing
1507 * child to the new parent. So tmigr_connect_child_parent() is
1508 * executed with the formerly top level group (child) and the newly
1509 * created group (parent).
1511 childstate.state = atomic_read(&child->migr_state);
1512 if (childstate.migrator != TMIGR_NONE) {
1513 struct tmigr_walk data;
1515 data.childmask = child->childmask;
1518 * There is only one new level per time. When connecting the
1519 * child and the parent and set the child active when the parent
1520 * is inactive, the parent needs to be the uppermost
1521 * level. Otherwise there went something wrong!
1523 WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
1527 static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
1529 struct tmigr_group *group, *child, **stack;
1530 int top = 0, err = 0, i = 0;
1531 struct list_head *lvllist;
1533 stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL);
1538 group = tmigr_get_group(cpu, node, i);
1539 if (IS_ERR(group)) {
1540 err = PTR_ERR(group);
1548 * When booting only less CPUs of a system than CPUs are
1549 * available, not all calculated hierarchy levels are required.
1551 * The loop is aborted as soon as the highest level, which might
1552 * be different from tmigr_hierarchy_levels, contains only a
1555 if (group->parent || i == tmigr_hierarchy_levels ||
1556 (list_empty(&tmigr_level_list[i]) &&
1557 list_is_singular(&tmigr_level_list[i - 1])))
1560 } while (i < tmigr_hierarchy_levels);
1566 list_del(&group->list);
1571 WARN_ON_ONCE(i != group->level);
1574 * Update tmc -> group / child -> group connection
1577 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1579 raw_spin_lock_irq(&group->lock);
1581 tmc->tmgroup = group;
1582 tmc->childmask = BIT(group->num_children++);
1584 raw_spin_unlock_irq(&group->lock);
1586 trace_tmigr_connect_cpu_parent(tmc);
1588 /* There are no children that need to be connected */
1591 child = stack[i - 1];
1592 tmigr_connect_child_parent(child, group);
1595 /* check if uppermost level was newly created */
1599 WARN_ON_ONCE(top == 0);
1601 lvllist = &tmigr_level_list[top];
1602 if (group->num_children == 1 && list_is_singular(lvllist)) {
1603 lvllist = &tmigr_level_list[top - 1];
1604 list_for_each_entry(child, lvllist, list) {
1608 tmigr_connect_child_parent(child, group);
1618 static int tmigr_add_cpu(unsigned int cpu)
1620 int node = cpu_to_node(cpu);
1623 mutex_lock(&tmigr_mutex);
1624 ret = tmigr_setup_groups(cpu, node);
1625 mutex_unlock(&tmigr_mutex);
1630 static int tmigr_cpu_online(unsigned int cpu)
1632 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1635 /* First online attempt? Initialize CPU data */
1636 if (!tmc->tmgroup) {
1637 raw_spin_lock_init(&tmc->lock);
1639 ret = tmigr_add_cpu(cpu);
1643 if (tmc->childmask == 0)
1646 timerqueue_init(&tmc->cpuevt.nextevt);
1647 tmc->cpuevt.nextevt.expires = KTIME_MAX;
1648 tmc->cpuevt.ignore = true;
1649 tmc->cpuevt.cpu = cpu;
1651 tmc->remote = false;
1652 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1654 raw_spin_lock_irq(&tmc->lock);
1655 trace_tmigr_cpu_online(tmc);
1656 tmc->idle = timer_base_is_idle();
1658 __tmigr_cpu_activate(tmc);
1660 raw_spin_unlock_irq(&tmc->lock);
1665 * tmigr_trigger_active() - trigger a CPU to become active again
1667 * This function is executed on a CPU which is part of cpu_online_mask, when the
1668 * last active CPU in the hierarchy is offlining. With this, it is ensured that
1669 * the other CPU is active and takes over the migrator duty.
1671 static long tmigr_trigger_active(void *unused)
1673 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1675 WARN_ON_ONCE(!tmc->online || tmc->idle);
1680 static int tmigr_cpu_offline(unsigned int cpu)
1682 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1686 raw_spin_lock_irq(&tmc->lock);
1687 tmc->online = false;
1688 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1691 * CPU has to handle the local events on his own, when on the way to
1692 * offline; Therefore nextevt value is set to KTIME_MAX
1694 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
1695 trace_tmigr_cpu_offline(tmc);
1696 raw_spin_unlock_irq(&tmc->lock);
1698 if (firstexp != KTIME_MAX) {
1699 migrator = cpumask_any_but(cpu_online_mask, cpu);
1700 work_on_cpu(migrator, tmigr_trigger_active, NULL);
1706 static int __init tmigr_init(void)
1708 unsigned int cpulvl, nodelvl, cpus_per_node, i;
1709 unsigned int nnodes = num_possible_nodes();
1710 unsigned int ncpus = num_possible_cpus();
1713 BUILD_BUG_ON_NOT_POWER_OF_2(TMIGR_CHILDREN_PER_GROUP);
1715 /* Nothing to do if running on UP */
1720 * Calculate the required hierarchy levels. Unfortunately there is no
1721 * reliable information available, unless all possible CPUs have been
1722 * brought up and all NUMA nodes are populated.
1724 * Estimate the number of levels with the number of possible nodes and
1725 * the number of possible CPUs. Assume CPUs are spread evenly across
1726 * nodes. We cannot rely on cpumask_of_node() because it only works for
1729 cpus_per_node = DIV_ROUND_UP(ncpus, nnodes);
1731 /* Calc the hierarchy levels required to hold the CPUs of a node */
1732 cpulvl = DIV_ROUND_UP(order_base_2(cpus_per_node),
1733 ilog2(TMIGR_CHILDREN_PER_GROUP));
1735 /* Calculate the extra levels to connect all nodes */
1736 nodelvl = DIV_ROUND_UP(order_base_2(nnodes),
1737 ilog2(TMIGR_CHILDREN_PER_GROUP));
1739 tmigr_hierarchy_levels = cpulvl + nodelvl;
1742 * If a NUMA node spawns more than one CPU level group then the next
1743 * level(s) of the hierarchy contains groups which handle all CPU groups
1744 * of the same NUMA node. The level above goes across NUMA nodes. Store
1745 * this information for the setup code to decide in which level node
1746 * matching is no longer required.
1748 tmigr_crossnode_level = cpulvl;
1750 tmigr_level_list = kcalloc(tmigr_hierarchy_levels, sizeof(struct list_head), GFP_KERNEL);
1751 if (!tmigr_level_list)
1754 for (i = 0; i < tmigr_hierarchy_levels; i++)
1755 INIT_LIST_HEAD(&tmigr_level_list[i]);
1757 pr_info("Timer migration: %d hierarchy levels; %d children per group;"
1758 " %d crossnode level\n",
1759 tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP,
1760 tmigr_crossnode_level);
1762 ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
1763 tmigr_cpu_online, tmigr_cpu_offline);
1770 pr_err("Timer migration setup failed\n");
1773 late_initcall(tmigr_init);