1 // SPDX-License-Identifier: GPL-2.0-only
5 * Print the CFS rbtree and other debugging details
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
11 static DEFINE_SPINLOCK(sched_debug_lock);
14 * This allows printing both to /proc/sched_debug and
17 #define SEQ_printf(m, x...) \
26 * Ease the printing of nsec fields:
28 static long long nsec_high(unsigned long long nsec)
30 if ((long long)nsec < 0) {
32 do_div(nsec, 1000000);
35 do_div(nsec, 1000000);
40 static unsigned long nsec_low(unsigned long long nsec)
42 if ((long long)nsec < 0)
45 return do_div(nsec, 1000000);
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
50 #define SCHED_FEAT(name, enabled) \
53 static const char * const sched_feat_names[] = {
59 static int sched_feat_show(struct seq_file *m, void *v)
63 for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 if (!(sysctl_sched_features & (1UL << i)))
66 seq_printf(m, "%s ", sched_feat_names[i]);
73 #ifdef CONFIG_JUMP_LABEL
75 #define jump_label_key__true STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
78 #define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
87 static void sched_feat_disable(int i)
89 static_key_disable_cpuslocked(&sched_feat_keys[i]);
92 static void sched_feat_enable(int i)
94 static_key_enable_cpuslocked(&sched_feat_keys[i]);
97 static void sched_feat_disable(int i) { };
98 static void sched_feat_enable(int i) { };
99 #endif /* CONFIG_JUMP_LABEL */
101 static int sched_feat_set(char *cmp)
106 if (strncmp(cmp, "NO_", 3) == 0) {
111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
127 sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
138 if (copy_from_user(&buf, ubuf, cnt))
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
148 ret = sched_feat_set(cmp);
159 static int sched_feat_open(struct inode *inode, struct file *filp)
161 return single_open(filp, sched_feat_show, NULL);
164 static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
169 .release = single_release,
174 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
175 size_t cnt, loff_t *ppos)
182 if (copy_from_user(&buf, ubuf, cnt))
185 if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
188 if (sched_update_scaling())
195 static int sched_scaling_show(struct seq_file *m, void *v)
197 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
201 static int sched_scaling_open(struct inode *inode, struct file *filp)
203 return single_open(filp, sched_scaling_show, NULL);
206 static const struct file_operations sched_scaling_fops = {
207 .open = sched_scaling_open,
208 .write = sched_scaling_write,
211 .release = single_release,
216 #ifdef CONFIG_PREEMPT_DYNAMIC
218 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
219 size_t cnt, loff_t *ppos)
227 if (copy_from_user(&buf, ubuf, cnt))
231 mode = sched_dynamic_mode(strstrip(buf));
235 sched_dynamic_update(mode);
242 static int sched_dynamic_show(struct seq_file *m, void *v)
244 static const char * preempt_modes[] = {
245 "none", "voluntary", "full"
249 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
250 if (preempt_dynamic_mode == i)
252 seq_puts(m, preempt_modes[i]);
253 if (preempt_dynamic_mode == i)
263 static int sched_dynamic_open(struct inode *inode, struct file *filp)
265 return single_open(filp, sched_dynamic_show, NULL);
268 static const struct file_operations sched_dynamic_fops = {
269 .open = sched_dynamic_open,
270 .write = sched_dynamic_write,
273 .release = single_release,
276 #endif /* CONFIG_PREEMPT_DYNAMIC */
278 __read_mostly bool sched_debug_enabled;
280 static struct dentry *debugfs_sched;
282 static __init int sched_init_debug(void)
284 struct dentry __maybe_unused *numa;
286 debugfs_sched = debugfs_create_dir("sched", NULL);
288 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
289 debugfs_create_bool("debug_enabled", 0644, debugfs_sched, &sched_debug_enabled);
290 #ifdef CONFIG_PREEMPT_DYNAMIC
291 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
294 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
295 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
296 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
299 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
300 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
301 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
304 #ifdef CONFIG_NUMA_BALANCING
305 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
307 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
308 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
309 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
310 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
315 late_initcall(sched_init_debug);
321 static struct ctl_table sd_ctl_dir[] = {
323 .procname = "sched_domain",
329 static struct ctl_table sd_ctl_root[] = {
331 .procname = "kernel",
338 static struct ctl_table *sd_alloc_ctl_entry(int n)
340 struct ctl_table *entry =
341 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
346 static void sd_free_ctl_entry(struct ctl_table **tablep)
348 struct ctl_table *entry;
351 * In the intermediate directories, both the child directory and
352 * procname are dynamically allocated and could fail but the mode
353 * will always be set. In the lowest directory the names are
354 * static strings and all have proc handlers.
356 for (entry = *tablep; entry->mode; entry++) {
358 sd_free_ctl_entry(&entry->child);
359 if (entry->proc_handler == NULL)
360 kfree(entry->procname);
368 set_table_entry(struct ctl_table *entry,
369 const char *procname, void *data, int maxlen,
370 umode_t mode, proc_handler *proc_handler)
372 entry->procname = procname;
374 entry->maxlen = maxlen;
376 entry->proc_handler = proc_handler;
379 static int sd_ctl_doflags(struct ctl_table *table, int write,
380 void *buffer, size_t *lenp, loff_t *ppos)
382 unsigned long flags = *(unsigned long *)table->data;
383 size_t data_size = 0;
391 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
392 char *name = sd_flag_debug[idx].name;
394 /* Name plus whitespace */
395 data_size += strlen(name) + 1;
398 if (*ppos > data_size) {
403 buf = kcalloc(data_size + 1, sizeof(*buf), GFP_KERNEL);
407 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
408 char *name = sd_flag_debug[idx].name;
410 len += snprintf(buf + len, strlen(name) + 2, "%s ", name);
419 memcpy(buffer, tmp, len);
421 ((char *)buffer)[len] = '\n';
433 static struct ctl_table *
434 sd_alloc_ctl_domain_table(struct sched_domain *sd)
436 struct ctl_table *table = sd_alloc_ctl_entry(9);
441 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
442 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
443 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
444 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
445 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
446 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, sd_ctl_doflags);
447 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
448 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
449 /* &table[8] is terminator */
454 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
456 struct ctl_table *entry, *table;
457 struct sched_domain *sd;
458 int domain_num = 0, i;
461 for_each_domain(cpu, sd)
463 entry = table = sd_alloc_ctl_entry(domain_num + 1);
468 for_each_domain(cpu, sd) {
469 snprintf(buf, 32, "domain%d", i);
470 entry->procname = kstrdup(buf, GFP_KERNEL);
472 entry->child = sd_alloc_ctl_domain_table(sd);
479 static cpumask_var_t sd_sysctl_cpus;
480 static struct ctl_table_header *sd_sysctl_header;
482 void register_sched_domain_sysctl(void)
484 static struct ctl_table *cpu_entries;
485 static struct ctl_table **cpu_idx;
486 static bool init_done = false;
491 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
495 WARN_ON(sd_ctl_dir[0].child);
496 sd_ctl_dir[0].child = cpu_entries;
500 struct ctl_table *e = cpu_entries;
502 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
506 /* deal with sparse possible map */
507 for_each_possible_cpu(i) {
513 if (!cpumask_available(sd_sysctl_cpus)) {
514 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
520 /* init to possible to not have holes in @cpu_entries */
521 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
524 for_each_cpu(i, sd_sysctl_cpus) {
525 struct ctl_table *e = cpu_idx[i];
528 sd_free_ctl_entry(&e->child);
531 snprintf(buf, 32, "cpu%d", i);
532 e->procname = kstrdup(buf, GFP_KERNEL);
535 e->child = sd_alloc_ctl_cpu_table(i);
537 __cpumask_clear_cpu(i, sd_sysctl_cpus);
540 WARN_ON(sd_sysctl_header);
541 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
544 void dirty_sched_domain_sysctl(int cpu)
546 if (cpumask_available(sd_sysctl_cpus))
547 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
550 /* may be called multiple times per register */
551 void unregister_sched_domain_sysctl(void)
553 unregister_sysctl_table(sd_sysctl_header);
554 sd_sysctl_header = NULL;
556 #endif /* CONFIG_SYSCTL */
557 #endif /* CONFIG_SMP */
559 #ifdef CONFIG_FAIR_GROUP_SCHED
560 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
562 struct sched_entity *se = tg->se[cpu];
564 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
565 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
566 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
567 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
574 PN(se->sum_exec_runtime);
576 if (schedstat_enabled()) {
577 PN_SCHEDSTAT(se->statistics.wait_start);
578 PN_SCHEDSTAT(se->statistics.sleep_start);
579 PN_SCHEDSTAT(se->statistics.block_start);
580 PN_SCHEDSTAT(se->statistics.sleep_max);
581 PN_SCHEDSTAT(se->statistics.block_max);
582 PN_SCHEDSTAT(se->statistics.exec_max);
583 PN_SCHEDSTAT(se->statistics.slice_max);
584 PN_SCHEDSTAT(se->statistics.wait_max);
585 PN_SCHEDSTAT(se->statistics.wait_sum);
586 P_SCHEDSTAT(se->statistics.wait_count);
593 P(se->avg.runnable_avg);
603 #ifdef CONFIG_CGROUP_SCHED
604 static char group_path[PATH_MAX];
606 static char *task_group_path(struct task_group *tg)
608 if (autogroup_path(tg, group_path, PATH_MAX))
611 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
618 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
620 if (task_current(rq, p))
623 SEQ_printf(m, " %c", task_state_to_char(p));
625 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
626 p->comm, task_pid_nr(p),
627 SPLIT_NS(p->se.vruntime),
628 (long long)(p->nvcsw + p->nivcsw),
631 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
632 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
633 SPLIT_NS(p->se.sum_exec_runtime),
634 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
636 #ifdef CONFIG_NUMA_BALANCING
637 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
639 #ifdef CONFIG_CGROUP_SCHED
640 SEQ_printf(m, " %s", task_group_path(task_group(p)));
646 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
648 struct task_struct *g, *p;
651 SEQ_printf(m, "runnable tasks:\n");
652 SEQ_printf(m, " S task PID tree-key switches prio"
653 " wait-time sum-exec sum-sleep\n");
654 SEQ_printf(m, "-------------------------------------------------------"
655 "------------------------------------------------------\n");
658 for_each_process_thread(g, p) {
659 if (task_cpu(p) != rq_cpu)
662 print_task(m, rq, p);
667 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
669 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
670 spread, rq0_min_vruntime, spread0;
671 struct rq *rq = cpu_rq(cpu);
672 struct sched_entity *last;
675 #ifdef CONFIG_FAIR_GROUP_SCHED
677 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
680 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
682 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
683 SPLIT_NS(cfs_rq->exec_clock));
685 raw_spin_lock_irqsave(&rq->lock, flags);
686 if (rb_first_cached(&cfs_rq->tasks_timeline))
687 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
688 last = __pick_last_entity(cfs_rq);
690 max_vruntime = last->vruntime;
691 min_vruntime = cfs_rq->min_vruntime;
692 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
693 raw_spin_unlock_irqrestore(&rq->lock, flags);
694 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
695 SPLIT_NS(MIN_vruntime));
696 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
697 SPLIT_NS(min_vruntime));
698 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
699 SPLIT_NS(max_vruntime));
700 spread = max_vruntime - MIN_vruntime;
701 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
703 spread0 = min_vruntime - rq0_min_vruntime;
704 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
706 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
707 cfs_rq->nr_spread_over);
708 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
709 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
711 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
712 cfs_rq->avg.load_avg);
713 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
714 cfs_rq->avg.runnable_avg);
715 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
716 cfs_rq->avg.util_avg);
717 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
718 cfs_rq->avg.util_est.enqueued);
719 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
720 cfs_rq->removed.load_avg);
721 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
722 cfs_rq->removed.util_avg);
723 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
724 cfs_rq->removed.runnable_avg);
725 #ifdef CONFIG_FAIR_GROUP_SCHED
726 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
727 cfs_rq->tg_load_avg_contrib);
728 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
729 atomic_long_read(&cfs_rq->tg->load_avg));
732 #ifdef CONFIG_CFS_BANDWIDTH
733 SEQ_printf(m, " .%-30s: %d\n", "throttled",
735 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
736 cfs_rq->throttle_count);
739 #ifdef CONFIG_FAIR_GROUP_SCHED
740 print_cfs_group_stats(m, cpu, cfs_rq->tg);
744 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
746 #ifdef CONFIG_RT_GROUP_SCHED
748 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
751 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
755 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
757 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
759 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
774 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
779 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
782 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
787 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
789 dl_bw = &dl_rq->dl_bw;
791 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
792 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
797 static void print_cpu(struct seq_file *m, int cpu)
799 struct rq *rq = cpu_rq(cpu);
804 unsigned int freq = cpu_khz ? : 1;
806 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
807 cpu, freq / 1000, (freq % 1000));
810 SEQ_printf(m, "cpu#%d\n", cpu);
815 if (sizeof(rq->x) == 4) \
816 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
818 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
822 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
826 P(nr_uninterruptible);
828 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
835 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
837 P64(max_idle_balance_cost);
841 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
842 if (schedstat_enabled()) {
851 spin_lock_irqsave(&sched_debug_lock, flags);
852 print_cfs_stats(m, cpu);
853 print_rt_stats(m, cpu);
854 print_dl_stats(m, cpu);
856 print_rq(m, rq, cpu);
857 spin_unlock_irqrestore(&sched_debug_lock, flags);
861 static const char *sched_tunable_scaling_names[] = {
867 static void sched_debug_header(struct seq_file *m)
869 u64 ktime, sched_clk, cpu_clk;
872 local_irq_save(flags);
873 ktime = ktime_to_ns(ktime_get());
874 sched_clk = sched_clock();
875 cpu_clk = local_clock();
876 local_irq_restore(flags);
878 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
879 init_utsname()->release,
880 (int)strcspn(init_utsname()->version, " "),
881 init_utsname()->version);
884 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
886 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
891 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
892 P(sched_clock_stable());
898 SEQ_printf(m, "sysctl_sched\n");
901 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
903 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
904 PN(sysctl_sched_latency);
905 PN(sysctl_sched_min_granularity);
906 PN(sysctl_sched_wakeup_granularity);
907 P(sysctl_sched_child_runs_first);
908 P(sysctl_sched_features);
912 SEQ_printf(m, " .%-40s: %d (%s)\n",
913 "sysctl_sched_tunable_scaling",
914 sysctl_sched_tunable_scaling,
915 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
919 static int sched_debug_show(struct seq_file *m, void *v)
921 int cpu = (unsigned long)(v - 2);
926 sched_debug_header(m);
931 void sysrq_sched_debug_show(void)
935 sched_debug_header(NULL);
936 for_each_online_cpu(cpu) {
938 * Need to reset softlockup watchdogs on all CPUs, because
939 * another CPU might be blocked waiting for us to process
940 * an IPI or stop_machine.
942 touch_nmi_watchdog();
943 touch_all_softlockup_watchdogs();
944 print_cpu(NULL, cpu);
949 * This iterator needs some explanation.
950 * It returns 1 for the header position.
951 * This means 2 is CPU 0.
952 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
953 * to use cpumask_* to iterate over the CPUs.
955 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
957 unsigned long n = *offset;
965 n = cpumask_next(n - 1, cpu_online_mask);
967 n = cpumask_first(cpu_online_mask);
972 return (void *)(unsigned long)(n + 2);
977 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
980 return sched_debug_start(file, offset);
983 static void sched_debug_stop(struct seq_file *file, void *data)
987 static const struct seq_operations sched_debug_sops = {
988 .start = sched_debug_start,
989 .next = sched_debug_next,
990 .stop = sched_debug_stop,
991 .show = sched_debug_show,
994 static int __init init_sched_debug_procfs(void)
996 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
1001 __initcall(init_sched_debug_procfs);
1003 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
1004 #define __P(F) __PS(#F, F)
1005 #define P(F) __PS(#F, p->F)
1006 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
1007 #define __PN(F) __PSN(#F, F)
1008 #define PN(F) __PSN(#F, p->F)
1011 #ifdef CONFIG_NUMA_BALANCING
1012 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1013 unsigned long tpf, unsigned long gsf, unsigned long gpf)
1015 SEQ_printf(m, "numa_faults node=%d ", node);
1016 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
1017 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
1022 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
1024 #ifdef CONFIG_NUMA_BALANCING
1025 struct mempolicy *pol;
1028 P(mm->numa_scan_seq);
1032 if (pol && !(pol->flags & MPOL_F_MORON))
1037 P(numa_pages_migrated);
1038 P(numa_preferred_nid);
1039 P(total_numa_faults);
1040 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
1041 task_node(p), task_numa_group_id(p));
1042 show_numa_stats(p, m);
1047 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1050 unsigned long nr_switches;
1052 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
1055 "---------------------------------------------------------"
1058 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
1059 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
1063 PN(se.sum_exec_runtime);
1065 nr_switches = p->nvcsw + p->nivcsw;
1067 P(se.nr_migrations);
1069 if (schedstat_enabled()) {
1070 u64 avg_atom, avg_per_cpu;
1072 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
1073 PN_SCHEDSTAT(se.statistics.wait_start);
1074 PN_SCHEDSTAT(se.statistics.sleep_start);
1075 PN_SCHEDSTAT(se.statistics.block_start);
1076 PN_SCHEDSTAT(se.statistics.sleep_max);
1077 PN_SCHEDSTAT(se.statistics.block_max);
1078 PN_SCHEDSTAT(se.statistics.exec_max);
1079 PN_SCHEDSTAT(se.statistics.slice_max);
1080 PN_SCHEDSTAT(se.statistics.wait_max);
1081 PN_SCHEDSTAT(se.statistics.wait_sum);
1082 P_SCHEDSTAT(se.statistics.wait_count);
1083 PN_SCHEDSTAT(se.statistics.iowait_sum);
1084 P_SCHEDSTAT(se.statistics.iowait_count);
1085 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
1086 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
1087 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
1088 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
1089 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
1090 P_SCHEDSTAT(se.statistics.nr_wakeups);
1091 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
1092 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
1093 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
1094 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
1095 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
1096 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
1097 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
1098 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
1100 avg_atom = p->se.sum_exec_runtime;
1102 avg_atom = div64_ul(avg_atom, nr_switches);
1106 avg_per_cpu = p->se.sum_exec_runtime;
1107 if (p->se.nr_migrations) {
1108 avg_per_cpu = div64_u64(avg_per_cpu,
1109 p->se.nr_migrations);
1119 __PS("nr_voluntary_switches", p->nvcsw);
1120 __PS("nr_involuntary_switches", p->nivcsw);
1125 P(se.avg.runnable_sum);
1128 P(se.avg.runnable_avg);
1130 P(se.avg.last_update_time);
1131 P(se.avg.util_est.ewma);
1132 P(se.avg.util_est.enqueued);
1134 #ifdef CONFIG_UCLAMP_TASK
1135 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1136 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1137 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1138 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1142 if (task_has_dl_policy(p)) {
1150 unsigned int this_cpu = raw_smp_processor_id();
1153 t0 = cpu_clock(this_cpu);
1154 t1 = cpu_clock(this_cpu);
1155 __PS("clock-delta", t1-t0);
1158 sched_show_numa(p, m);
1161 void proc_sched_set_task(struct task_struct *p)
1163 #ifdef CONFIG_SCHEDSTATS
1164 memset(&p->se.statistics, 0, sizeof(p->se.statistics));