Merge tag 's390-5.5-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[sfrench/cifs-2.6.git] / kernel / sched / cpufreq_schedutil.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * CPUFreq governor based on scheduler-provided CPU utilization data.
4  *
5  * Copyright (C) 2016, Intel Corporation
6  * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include "sched.h"
12
13 #include <linux/sched/cpufreq.h>
14 #include <trace/events/power.h>
15
16 #define IOWAIT_BOOST_MIN        (SCHED_CAPACITY_SCALE / 8)
17
18 struct sugov_tunables {
19         struct gov_attr_set     attr_set;
20         unsigned int            rate_limit_us;
21 };
22
23 struct sugov_policy {
24         struct cpufreq_policy   *policy;
25
26         struct sugov_tunables   *tunables;
27         struct list_head        tunables_hook;
28
29         raw_spinlock_t          update_lock;    /* For shared policies */
30         u64                     last_freq_update_time;
31         s64                     freq_update_delay_ns;
32         unsigned int            next_freq;
33         unsigned int            cached_raw_freq;
34
35         /* The next fields are only needed if fast switch cannot be used: */
36         struct                  irq_work irq_work;
37         struct                  kthread_work work;
38         struct                  mutex work_lock;
39         struct                  kthread_worker worker;
40         struct task_struct      *thread;
41         bool                    work_in_progress;
42
43         bool                    limits_changed;
44         bool                    need_freq_update;
45 };
46
47 struct sugov_cpu {
48         struct update_util_data update_util;
49         struct sugov_policy     *sg_policy;
50         unsigned int            cpu;
51
52         bool                    iowait_boost_pending;
53         unsigned int            iowait_boost;
54         u64                     last_update;
55
56         unsigned long           bw_dl;
57         unsigned long           max;
58
59         /* The field below is for single-CPU policies only: */
60 #ifdef CONFIG_NO_HZ_COMMON
61         unsigned long           saved_idle_calls;
62 #endif
63 };
64
65 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
66
67 /************************ Governor internals ***********************/
68
69 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
70 {
71         s64 delta_ns;
72
73         /*
74          * Since cpufreq_update_util() is called with rq->lock held for
75          * the @target_cpu, our per-CPU data is fully serialized.
76          *
77          * However, drivers cannot in general deal with cross-CPU
78          * requests, so while get_next_freq() will work, our
79          * sugov_update_commit() call may not for the fast switching platforms.
80          *
81          * Hence stop here for remote requests if they aren't supported
82          * by the hardware, as calculating the frequency is pointless if
83          * we cannot in fact act on it.
84          *
85          * This is needed on the slow switching platforms too to prevent CPUs
86          * going offline from leaving stale IRQ work items behind.
87          */
88         if (!cpufreq_this_cpu_can_update(sg_policy->policy))
89                 return false;
90
91         if (unlikely(sg_policy->limits_changed)) {
92                 sg_policy->limits_changed = false;
93                 sg_policy->need_freq_update = true;
94                 return true;
95         }
96
97         delta_ns = time - sg_policy->last_freq_update_time;
98
99         return delta_ns >= sg_policy->freq_update_delay_ns;
100 }
101
102 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
103                                    unsigned int next_freq)
104 {
105         if (sg_policy->next_freq == next_freq)
106                 return false;
107
108         sg_policy->next_freq = next_freq;
109         sg_policy->last_freq_update_time = time;
110
111         return true;
112 }
113
114 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
115                               unsigned int next_freq)
116 {
117         struct cpufreq_policy *policy = sg_policy->policy;
118         int cpu;
119
120         if (!sugov_update_next_freq(sg_policy, time, next_freq))
121                 return;
122
123         next_freq = cpufreq_driver_fast_switch(policy, next_freq);
124         if (!next_freq)
125                 return;
126
127         policy->cur = next_freq;
128
129         if (trace_cpu_frequency_enabled()) {
130                 for_each_cpu(cpu, policy->cpus)
131                         trace_cpu_frequency(next_freq, cpu);
132         }
133 }
134
135 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
136                                   unsigned int next_freq)
137 {
138         if (!sugov_update_next_freq(sg_policy, time, next_freq))
139                 return;
140
141         if (!sg_policy->work_in_progress) {
142                 sg_policy->work_in_progress = true;
143                 irq_work_queue(&sg_policy->irq_work);
144         }
145 }
146
147 /**
148  * get_next_freq - Compute a new frequency for a given cpufreq policy.
149  * @sg_policy: schedutil policy object to compute the new frequency for.
150  * @util: Current CPU utilization.
151  * @max: CPU capacity.
152  *
153  * If the utilization is frequency-invariant, choose the new frequency to be
154  * proportional to it, that is
155  *
156  * next_freq = C * max_freq * util / max
157  *
158  * Otherwise, approximate the would-be frequency-invariant utilization by
159  * util_raw * (curr_freq / max_freq) which leads to
160  *
161  * next_freq = C * curr_freq * util_raw / max
162  *
163  * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
164  *
165  * The lowest driver-supported frequency which is equal or greater than the raw
166  * next_freq (as calculated above) is returned, subject to policy min/max and
167  * cpufreq driver limitations.
168  */
169 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
170                                   unsigned long util, unsigned long max)
171 {
172         struct cpufreq_policy *policy = sg_policy->policy;
173         unsigned int freq = arch_scale_freq_invariant() ?
174                                 policy->cpuinfo.max_freq : policy->cur;
175
176         freq = map_util_freq(util, freq, max);
177
178         if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
179                 return sg_policy->next_freq;
180
181         sg_policy->need_freq_update = false;
182         sg_policy->cached_raw_freq = freq;
183         return cpufreq_driver_resolve_freq(policy, freq);
184 }
185
186 /*
187  * This function computes an effective utilization for the given CPU, to be
188  * used for frequency selection given the linear relation: f = u * f_max.
189  *
190  * The scheduler tracks the following metrics:
191  *
192  *   cpu_util_{cfs,rt,dl,irq}()
193  *   cpu_bw_dl()
194  *
195  * Where the cfs,rt and dl util numbers are tracked with the same metric and
196  * synchronized windows and are thus directly comparable.
197  *
198  * The cfs,rt,dl utilization are the running times measured with rq->clock_task
199  * which excludes things like IRQ and steal-time. These latter are then accrued
200  * in the irq utilization.
201  *
202  * The DL bandwidth number otoh is not a measured metric but a value computed
203  * based on the task model parameters and gives the minimal utilization
204  * required to meet deadlines.
205  */
206 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
207                                  unsigned long max, enum schedutil_type type,
208                                  struct task_struct *p)
209 {
210         unsigned long dl_util, util, irq;
211         struct rq *rq = cpu_rq(cpu);
212
213         if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
214             type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
215                 return max;
216         }
217
218         /*
219          * Early check to see if IRQ/steal time saturates the CPU, can be
220          * because of inaccuracies in how we track these -- see
221          * update_irq_load_avg().
222          */
223         irq = cpu_util_irq(rq);
224         if (unlikely(irq >= max))
225                 return max;
226
227         /*
228          * Because the time spend on RT/DL tasks is visible as 'lost' time to
229          * CFS tasks and we use the same metric to track the effective
230          * utilization (PELT windows are synchronized) we can directly add them
231          * to obtain the CPU's actual utilization.
232          *
233          * CFS and RT utilization can be boosted or capped, depending on
234          * utilization clamp constraints requested by currently RUNNABLE
235          * tasks.
236          * When there are no CFS RUNNABLE tasks, clamps are released and
237          * frequency will be gracefully reduced with the utilization decay.
238          */
239         util = util_cfs + cpu_util_rt(rq);
240         if (type == FREQUENCY_UTIL)
241                 util = uclamp_util_with(rq, util, p);
242
243         dl_util = cpu_util_dl(rq);
244
245         /*
246          * For frequency selection we do not make cpu_util_dl() a permanent part
247          * of this sum because we want to use cpu_bw_dl() later on, but we need
248          * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
249          * that we select f_max when there is no idle time.
250          *
251          * NOTE: numerical errors or stop class might cause us to not quite hit
252          * saturation when we should -- something for later.
253          */
254         if (util + dl_util >= max)
255                 return max;
256
257         /*
258          * OTOH, for energy computation we need the estimated running time, so
259          * include util_dl and ignore dl_bw.
260          */
261         if (type == ENERGY_UTIL)
262                 util += dl_util;
263
264         /*
265          * There is still idle time; further improve the number by using the
266          * irq metric. Because IRQ/steal time is hidden from the task clock we
267          * need to scale the task numbers:
268          *
269          *              max - irq
270          *   U' = irq + --------- * U
271          *                 max
272          */
273         util = scale_irq_capacity(util, irq, max);
274         util += irq;
275
276         /*
277          * Bandwidth required by DEADLINE must always be granted while, for
278          * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
279          * to gracefully reduce the frequency when no tasks show up for longer
280          * periods of time.
281          *
282          * Ideally we would like to set bw_dl as min/guaranteed freq and util +
283          * bw_dl as requested freq. However, cpufreq is not yet ready for such
284          * an interface. So, we only do the latter for now.
285          */
286         if (type == FREQUENCY_UTIL)
287                 util += cpu_bw_dl(rq);
288
289         return min(max, util);
290 }
291
292 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
293 {
294         struct rq *rq = cpu_rq(sg_cpu->cpu);
295         unsigned long util = cpu_util_cfs(rq);
296         unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
297
298         sg_cpu->max = max;
299         sg_cpu->bw_dl = cpu_bw_dl(rq);
300
301         return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
302 }
303
304 /**
305  * sugov_iowait_reset() - Reset the IO boost status of a CPU.
306  * @sg_cpu: the sugov data for the CPU to boost
307  * @time: the update time from the caller
308  * @set_iowait_boost: true if an IO boost has been requested
309  *
310  * The IO wait boost of a task is disabled after a tick since the last update
311  * of a CPU. If a new IO wait boost is requested after more then a tick, then
312  * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
313  * efficiency by ignoring sporadic wakeups from IO.
314  */
315 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
316                                bool set_iowait_boost)
317 {
318         s64 delta_ns = time - sg_cpu->last_update;
319
320         /* Reset boost only if a tick has elapsed since last request */
321         if (delta_ns <= TICK_NSEC)
322                 return false;
323
324         sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
325         sg_cpu->iowait_boost_pending = set_iowait_boost;
326
327         return true;
328 }
329
330 /**
331  * sugov_iowait_boost() - Updates the IO boost status of a CPU.
332  * @sg_cpu: the sugov data for the CPU to boost
333  * @time: the update time from the caller
334  * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
335  *
336  * Each time a task wakes up after an IO operation, the CPU utilization can be
337  * boosted to a certain utilization which doubles at each "frequent and
338  * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
339  * of the maximum OPP.
340  *
341  * To keep doubling, an IO boost has to be requested at least once per tick,
342  * otherwise we restart from the utilization of the minimum OPP.
343  */
344 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
345                                unsigned int flags)
346 {
347         bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
348
349         /* Reset boost if the CPU appears to have been idle enough */
350         if (sg_cpu->iowait_boost &&
351             sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
352                 return;
353
354         /* Boost only tasks waking up after IO */
355         if (!set_iowait_boost)
356                 return;
357
358         /* Ensure boost doubles only one time at each request */
359         if (sg_cpu->iowait_boost_pending)
360                 return;
361         sg_cpu->iowait_boost_pending = true;
362
363         /* Double the boost at each request */
364         if (sg_cpu->iowait_boost) {
365                 sg_cpu->iowait_boost =
366                         min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
367                 return;
368         }
369
370         /* First wakeup after IO: start with minimum boost */
371         sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
372 }
373
374 /**
375  * sugov_iowait_apply() - Apply the IO boost to a CPU.
376  * @sg_cpu: the sugov data for the cpu to boost
377  * @time: the update time from the caller
378  * @util: the utilization to (eventually) boost
379  * @max: the maximum value the utilization can be boosted to
380  *
381  * A CPU running a task which woken up after an IO operation can have its
382  * utilization boosted to speed up the completion of those IO operations.
383  * The IO boost value is increased each time a task wakes up from IO, in
384  * sugov_iowait_apply(), and it's instead decreased by this function,
385  * each time an increase has not been requested (!iowait_boost_pending).
386  *
387  * A CPU which also appears to have been idle for at least one tick has also
388  * its IO boost utilization reset.
389  *
390  * This mechanism is designed to boost high frequently IO waiting tasks, while
391  * being more conservative on tasks which does sporadic IO operations.
392  */
393 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
394                                         unsigned long util, unsigned long max)
395 {
396         unsigned long boost;
397
398         /* No boost currently required */
399         if (!sg_cpu->iowait_boost)
400                 return util;
401
402         /* Reset boost if the CPU appears to have been idle enough */
403         if (sugov_iowait_reset(sg_cpu, time, false))
404                 return util;
405
406         if (!sg_cpu->iowait_boost_pending) {
407                 /*
408                  * No boost pending; reduce the boost value.
409                  */
410                 sg_cpu->iowait_boost >>= 1;
411                 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
412                         sg_cpu->iowait_boost = 0;
413                         return util;
414                 }
415         }
416
417         sg_cpu->iowait_boost_pending = false;
418
419         /*
420          * @util is already in capacity scale; convert iowait_boost
421          * into the same scale so we can compare.
422          */
423         boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
424         return max(boost, util);
425 }
426
427 #ifdef CONFIG_NO_HZ_COMMON
428 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
429 {
430         unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
431         bool ret = idle_calls == sg_cpu->saved_idle_calls;
432
433         sg_cpu->saved_idle_calls = idle_calls;
434         return ret;
435 }
436 #else
437 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
438 #endif /* CONFIG_NO_HZ_COMMON */
439
440 /*
441  * Make sugov_should_update_freq() ignore the rate limit when DL
442  * has increased the utilization.
443  */
444 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
445 {
446         if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
447                 sg_policy->limits_changed = true;
448 }
449
450 static void sugov_update_single(struct update_util_data *hook, u64 time,
451                                 unsigned int flags)
452 {
453         struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
454         struct sugov_policy *sg_policy = sg_cpu->sg_policy;
455         unsigned long util, max;
456         unsigned int next_f;
457         bool busy;
458
459         sugov_iowait_boost(sg_cpu, time, flags);
460         sg_cpu->last_update = time;
461
462         ignore_dl_rate_limit(sg_cpu, sg_policy);
463
464         if (!sugov_should_update_freq(sg_policy, time))
465                 return;
466
467         /* Limits may have changed, don't skip frequency update */
468         busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
469
470         util = sugov_get_util(sg_cpu);
471         max = sg_cpu->max;
472         util = sugov_iowait_apply(sg_cpu, time, util, max);
473         next_f = get_next_freq(sg_policy, util, max);
474         /*
475          * Do not reduce the frequency if the CPU has not been idle
476          * recently, as the reduction is likely to be premature then.
477          */
478         if (busy && next_f < sg_policy->next_freq) {
479                 next_f = sg_policy->next_freq;
480
481                 /* Reset cached freq as next_freq has changed */
482                 sg_policy->cached_raw_freq = 0;
483         }
484
485         /*
486          * This code runs under rq->lock for the target CPU, so it won't run
487          * concurrently on two different CPUs for the same target and it is not
488          * necessary to acquire the lock in the fast switch case.
489          */
490         if (sg_policy->policy->fast_switch_enabled) {
491                 sugov_fast_switch(sg_policy, time, next_f);
492         } else {
493                 raw_spin_lock(&sg_policy->update_lock);
494                 sugov_deferred_update(sg_policy, time, next_f);
495                 raw_spin_unlock(&sg_policy->update_lock);
496         }
497 }
498
499 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
500 {
501         struct sugov_policy *sg_policy = sg_cpu->sg_policy;
502         struct cpufreq_policy *policy = sg_policy->policy;
503         unsigned long util = 0, max = 1;
504         unsigned int j;
505
506         for_each_cpu(j, policy->cpus) {
507                 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
508                 unsigned long j_util, j_max;
509
510                 j_util = sugov_get_util(j_sg_cpu);
511                 j_max = j_sg_cpu->max;
512                 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
513
514                 if (j_util * max > j_max * util) {
515                         util = j_util;
516                         max = j_max;
517                 }
518         }
519
520         return get_next_freq(sg_policy, util, max);
521 }
522
523 static void
524 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
525 {
526         struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
527         struct sugov_policy *sg_policy = sg_cpu->sg_policy;
528         unsigned int next_f;
529
530         raw_spin_lock(&sg_policy->update_lock);
531
532         sugov_iowait_boost(sg_cpu, time, flags);
533         sg_cpu->last_update = time;
534
535         ignore_dl_rate_limit(sg_cpu, sg_policy);
536
537         if (sugov_should_update_freq(sg_policy, time)) {
538                 next_f = sugov_next_freq_shared(sg_cpu, time);
539
540                 if (sg_policy->policy->fast_switch_enabled)
541                         sugov_fast_switch(sg_policy, time, next_f);
542                 else
543                         sugov_deferred_update(sg_policy, time, next_f);
544         }
545
546         raw_spin_unlock(&sg_policy->update_lock);
547 }
548
549 static void sugov_work(struct kthread_work *work)
550 {
551         struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
552         unsigned int freq;
553         unsigned long flags;
554
555         /*
556          * Hold sg_policy->update_lock shortly to handle the case where:
557          * incase sg_policy->next_freq is read here, and then updated by
558          * sugov_deferred_update() just before work_in_progress is set to false
559          * here, we may miss queueing the new update.
560          *
561          * Note: If a work was queued after the update_lock is released,
562          * sugov_work() will just be called again by kthread_work code; and the
563          * request will be proceed before the sugov thread sleeps.
564          */
565         raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
566         freq = sg_policy->next_freq;
567         sg_policy->work_in_progress = false;
568         raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
569
570         mutex_lock(&sg_policy->work_lock);
571         __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
572         mutex_unlock(&sg_policy->work_lock);
573 }
574
575 static void sugov_irq_work(struct irq_work *irq_work)
576 {
577         struct sugov_policy *sg_policy;
578
579         sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
580
581         kthread_queue_work(&sg_policy->worker, &sg_policy->work);
582 }
583
584 /************************** sysfs interface ************************/
585
586 static struct sugov_tunables *global_tunables;
587 static DEFINE_MUTEX(global_tunables_lock);
588
589 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
590 {
591         return container_of(attr_set, struct sugov_tunables, attr_set);
592 }
593
594 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
595 {
596         struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
597
598         return sprintf(buf, "%u\n", tunables->rate_limit_us);
599 }
600
601 static ssize_t
602 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
603 {
604         struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
605         struct sugov_policy *sg_policy;
606         unsigned int rate_limit_us;
607
608         if (kstrtouint(buf, 10, &rate_limit_us))
609                 return -EINVAL;
610
611         tunables->rate_limit_us = rate_limit_us;
612
613         list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
614                 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
615
616         return count;
617 }
618
619 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
620
621 static struct attribute *sugov_attrs[] = {
622         &rate_limit_us.attr,
623         NULL
624 };
625 ATTRIBUTE_GROUPS(sugov);
626
627 static struct kobj_type sugov_tunables_ktype = {
628         .default_groups = sugov_groups,
629         .sysfs_ops = &governor_sysfs_ops,
630 };
631
632 /********************** cpufreq governor interface *********************/
633
634 struct cpufreq_governor schedutil_gov;
635
636 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
637 {
638         struct sugov_policy *sg_policy;
639
640         sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
641         if (!sg_policy)
642                 return NULL;
643
644         sg_policy->policy = policy;
645         raw_spin_lock_init(&sg_policy->update_lock);
646         return sg_policy;
647 }
648
649 static void sugov_policy_free(struct sugov_policy *sg_policy)
650 {
651         kfree(sg_policy);
652 }
653
654 static int sugov_kthread_create(struct sugov_policy *sg_policy)
655 {
656         struct task_struct *thread;
657         struct sched_attr attr = {
658                 .size           = sizeof(struct sched_attr),
659                 .sched_policy   = SCHED_DEADLINE,
660                 .sched_flags    = SCHED_FLAG_SUGOV,
661                 .sched_nice     = 0,
662                 .sched_priority = 0,
663                 /*
664                  * Fake (unused) bandwidth; workaround to "fix"
665                  * priority inheritance.
666                  */
667                 .sched_runtime  =  1000000,
668                 .sched_deadline = 10000000,
669                 .sched_period   = 10000000,
670         };
671         struct cpufreq_policy *policy = sg_policy->policy;
672         int ret;
673
674         /* kthread only required for slow path */
675         if (policy->fast_switch_enabled)
676                 return 0;
677
678         kthread_init_work(&sg_policy->work, sugov_work);
679         kthread_init_worker(&sg_policy->worker);
680         thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
681                                 "sugov:%d",
682                                 cpumask_first(policy->related_cpus));
683         if (IS_ERR(thread)) {
684                 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
685                 return PTR_ERR(thread);
686         }
687
688         ret = sched_setattr_nocheck(thread, &attr);
689         if (ret) {
690                 kthread_stop(thread);
691                 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
692                 return ret;
693         }
694
695         sg_policy->thread = thread;
696         kthread_bind_mask(thread, policy->related_cpus);
697         init_irq_work(&sg_policy->irq_work, sugov_irq_work);
698         mutex_init(&sg_policy->work_lock);
699
700         wake_up_process(thread);
701
702         return 0;
703 }
704
705 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
706 {
707         /* kthread only required for slow path */
708         if (sg_policy->policy->fast_switch_enabled)
709                 return;
710
711         kthread_flush_worker(&sg_policy->worker);
712         kthread_stop(sg_policy->thread);
713         mutex_destroy(&sg_policy->work_lock);
714 }
715
716 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
717 {
718         struct sugov_tunables *tunables;
719
720         tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
721         if (tunables) {
722                 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
723                 if (!have_governor_per_policy())
724                         global_tunables = tunables;
725         }
726         return tunables;
727 }
728
729 static void sugov_tunables_free(struct sugov_tunables *tunables)
730 {
731         if (!have_governor_per_policy())
732                 global_tunables = NULL;
733
734         kfree(tunables);
735 }
736
737 static int sugov_init(struct cpufreq_policy *policy)
738 {
739         struct sugov_policy *sg_policy;
740         struct sugov_tunables *tunables;
741         int ret = 0;
742
743         /* State should be equivalent to EXIT */
744         if (policy->governor_data)
745                 return -EBUSY;
746
747         cpufreq_enable_fast_switch(policy);
748
749         sg_policy = sugov_policy_alloc(policy);
750         if (!sg_policy) {
751                 ret = -ENOMEM;
752                 goto disable_fast_switch;
753         }
754
755         ret = sugov_kthread_create(sg_policy);
756         if (ret)
757                 goto free_sg_policy;
758
759         mutex_lock(&global_tunables_lock);
760
761         if (global_tunables) {
762                 if (WARN_ON(have_governor_per_policy())) {
763                         ret = -EINVAL;
764                         goto stop_kthread;
765                 }
766                 policy->governor_data = sg_policy;
767                 sg_policy->tunables = global_tunables;
768
769                 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
770                 goto out;
771         }
772
773         tunables = sugov_tunables_alloc(sg_policy);
774         if (!tunables) {
775                 ret = -ENOMEM;
776                 goto stop_kthread;
777         }
778
779         tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
780
781         policy->governor_data = sg_policy;
782         sg_policy->tunables = tunables;
783
784         ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
785                                    get_governor_parent_kobj(policy), "%s",
786                                    schedutil_gov.name);
787         if (ret)
788                 goto fail;
789
790 out:
791         mutex_unlock(&global_tunables_lock);
792         return 0;
793
794 fail:
795         kobject_put(&tunables->attr_set.kobj);
796         policy->governor_data = NULL;
797         sugov_tunables_free(tunables);
798
799 stop_kthread:
800         sugov_kthread_stop(sg_policy);
801         mutex_unlock(&global_tunables_lock);
802
803 free_sg_policy:
804         sugov_policy_free(sg_policy);
805
806 disable_fast_switch:
807         cpufreq_disable_fast_switch(policy);
808
809         pr_err("initialization failed (error %d)\n", ret);
810         return ret;
811 }
812
813 static void sugov_exit(struct cpufreq_policy *policy)
814 {
815         struct sugov_policy *sg_policy = policy->governor_data;
816         struct sugov_tunables *tunables = sg_policy->tunables;
817         unsigned int count;
818
819         mutex_lock(&global_tunables_lock);
820
821         count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
822         policy->governor_data = NULL;
823         if (!count)
824                 sugov_tunables_free(tunables);
825
826         mutex_unlock(&global_tunables_lock);
827
828         sugov_kthread_stop(sg_policy);
829         sugov_policy_free(sg_policy);
830         cpufreq_disable_fast_switch(policy);
831 }
832
833 static int sugov_start(struct cpufreq_policy *policy)
834 {
835         struct sugov_policy *sg_policy = policy->governor_data;
836         unsigned int cpu;
837
838         sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
839         sg_policy->last_freq_update_time        = 0;
840         sg_policy->next_freq                    = 0;
841         sg_policy->work_in_progress             = false;
842         sg_policy->limits_changed               = false;
843         sg_policy->need_freq_update             = false;
844         sg_policy->cached_raw_freq              = 0;
845
846         for_each_cpu(cpu, policy->cpus) {
847                 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
848
849                 memset(sg_cpu, 0, sizeof(*sg_cpu));
850                 sg_cpu->cpu                     = cpu;
851                 sg_cpu->sg_policy               = sg_policy;
852         }
853
854         for_each_cpu(cpu, policy->cpus) {
855                 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
856
857                 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
858                                              policy_is_shared(policy) ?
859                                                         sugov_update_shared :
860                                                         sugov_update_single);
861         }
862         return 0;
863 }
864
865 static void sugov_stop(struct cpufreq_policy *policy)
866 {
867         struct sugov_policy *sg_policy = policy->governor_data;
868         unsigned int cpu;
869
870         for_each_cpu(cpu, policy->cpus)
871                 cpufreq_remove_update_util_hook(cpu);
872
873         synchronize_rcu();
874
875         if (!policy->fast_switch_enabled) {
876                 irq_work_sync(&sg_policy->irq_work);
877                 kthread_cancel_work_sync(&sg_policy->work);
878         }
879 }
880
881 static void sugov_limits(struct cpufreq_policy *policy)
882 {
883         struct sugov_policy *sg_policy = policy->governor_data;
884
885         if (!policy->fast_switch_enabled) {
886                 mutex_lock(&sg_policy->work_lock);
887                 cpufreq_policy_apply_limits(policy);
888                 mutex_unlock(&sg_policy->work_lock);
889         }
890
891         sg_policy->limits_changed = true;
892 }
893
894 struct cpufreq_governor schedutil_gov = {
895         .name                   = "schedutil",
896         .owner                  = THIS_MODULE,
897         .dynamic_switching      = true,
898         .init                   = sugov_init,
899         .exit                   = sugov_exit,
900         .start                  = sugov_start,
901         .stop                   = sugov_stop,
902         .limits                 = sugov_limits,
903 };
904
905 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
906 struct cpufreq_governor *cpufreq_default_governor(void)
907 {
908         return &schedutil_gov;
909 }
910 #endif
911
912 static int __init sugov_register(void)
913 {
914         return cpufreq_register_governor(&schedutil_gov);
915 }
916 core_initcall(sugov_register);
917
918 #ifdef CONFIG_ENERGY_MODEL
919 extern bool sched_energy_update;
920 extern struct mutex sched_energy_mutex;
921
922 static void rebuild_sd_workfn(struct work_struct *work)
923 {
924         mutex_lock(&sched_energy_mutex);
925         sched_energy_update = true;
926         rebuild_sched_domains();
927         sched_energy_update = false;
928         mutex_unlock(&sched_energy_mutex);
929 }
930 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
931
932 /*
933  * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
934  * on governor changes to make sure the scheduler knows about it.
935  */
936 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
937                                   struct cpufreq_governor *old_gov)
938 {
939         if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
940                 /*
941                  * When called from the cpufreq_register_driver() path, the
942                  * cpu_hotplug_lock is already held, so use a work item to
943                  * avoid nested locking in rebuild_sched_domains().
944                  */
945                 schedule_work(&rebuild_sd_work);
946         }
947
948 }
949 #endif