selinux: kill 'flags' argument in avc_has_perm_flags() and avc_audit()
[sfrench/cifs-2.6.git] / drivers / cpufreq / cpufreq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/cpufreq/cpufreq.c
4  *
5  *  Copyright (C) 2001 Russell King
6  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8  *
9  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10  *      Added handling for CPU hotplug
11  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12  *      Fix handling for CPU hotplug -- affected CPUs
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 static LIST_HEAD(cpufreq_policy_list);
34
35 /* Macros to iterate over CPU policies */
36 #define for_each_suitable_policy(__policy, __active)                     \
37         list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38                 if ((__active) == !policy_is_inactive(__policy))
39
40 #define for_each_active_policy(__policy)                \
41         for_each_suitable_policy(__policy, true)
42 #define for_each_inactive_policy(__policy)              \
43         for_each_suitable_policy(__policy, false)
44
45 /* Iterate over governors */
46 static LIST_HEAD(cpufreq_governor_list);
47 #define for_each_governor(__governor)                           \
48         list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
49
50 static char default_governor[CPUFREQ_NAME_LEN];
51
52 /*
53  * The "cpufreq driver" - the arch- or hardware-dependent low
54  * level driver of CPUFreq support, and its spinlock. This lock
55  * also protects the cpufreq_cpu_data array.
56  */
57 static struct cpufreq_driver *cpufreq_driver;
58 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
59 static DEFINE_RWLOCK(cpufreq_driver_lock);
60
61 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
62 bool cpufreq_supports_freq_invariance(void)
63 {
64         return static_branch_likely(&cpufreq_freq_invariance);
65 }
66
67 /* Flag to suspend/resume CPUFreq governors */
68 static bool cpufreq_suspended;
69
70 static inline bool has_target(void)
71 {
72         return cpufreq_driver->target_index || cpufreq_driver->target;
73 }
74
75 /* internal prototypes */
76 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
77 static int cpufreq_init_governor(struct cpufreq_policy *policy);
78 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
79 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
80 static int cpufreq_set_policy(struct cpufreq_policy *policy,
81                               struct cpufreq_governor *new_gov,
82                               unsigned int new_pol);
83
84 /*
85  * Two notifier lists: the "policy" list is involved in the
86  * validation process for a new CPU frequency policy; the
87  * "transition" list for kernel code that needs to handle
88  * changes to devices when the CPU clock speed changes.
89  * The mutex locks both lists.
90  */
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
93
94 static int off __read_mostly;
95 static int cpufreq_disabled(void)
96 {
97         return off;
98 }
99 void disable_cpufreq(void)
100 {
101         off = 1;
102 }
103 static DEFINE_MUTEX(cpufreq_governor_mutex);
104
105 bool have_governor_per_policy(void)
106 {
107         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
108 }
109 EXPORT_SYMBOL_GPL(have_governor_per_policy);
110
111 static struct kobject *cpufreq_global_kobject;
112
113 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
114 {
115         if (have_governor_per_policy())
116                 return &policy->kobj;
117         else
118                 return cpufreq_global_kobject;
119 }
120 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
121
122 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
123 {
124         struct kernel_cpustat kcpustat;
125         u64 cur_wall_time;
126         u64 idle_time;
127         u64 busy_time;
128
129         cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
130
131         kcpustat_cpu_fetch(&kcpustat, cpu);
132
133         busy_time = kcpustat.cpustat[CPUTIME_USER];
134         busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
135         busy_time += kcpustat.cpustat[CPUTIME_IRQ];
136         busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
137         busy_time += kcpustat.cpustat[CPUTIME_STEAL];
138         busy_time += kcpustat.cpustat[CPUTIME_NICE];
139
140         idle_time = cur_wall_time - busy_time;
141         if (wall)
142                 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
143
144         return div_u64(idle_time, NSEC_PER_USEC);
145 }
146
147 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
148 {
149         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
150
151         if (idle_time == -1ULL)
152                 return get_cpu_idle_time_jiffy(cpu, wall);
153         else if (!io_busy)
154                 idle_time += get_cpu_iowait_time_us(cpu, wall);
155
156         return idle_time;
157 }
158 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
159
160 /*
161  * This is a generic cpufreq init() routine which can be used by cpufreq
162  * drivers of SMP systems. It will do following:
163  * - validate & show freq table passed
164  * - set policies transition latency
165  * - policy->cpus with all possible CPUs
166  */
167 void cpufreq_generic_init(struct cpufreq_policy *policy,
168                 struct cpufreq_frequency_table *table,
169                 unsigned int transition_latency)
170 {
171         policy->freq_table = table;
172         policy->cpuinfo.transition_latency = transition_latency;
173
174         /*
175          * The driver only supports the SMP configuration where all processors
176          * share the clock and voltage and clock.
177          */
178         cpumask_setall(policy->cpus);
179 }
180 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181
182 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
183 {
184         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185
186         return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
187 }
188 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
189
190 unsigned int cpufreq_generic_get(unsigned int cpu)
191 {
192         struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
193
194         if (!policy || IS_ERR(policy->clk)) {
195                 pr_err("%s: No %s associated to cpu: %d\n",
196                        __func__, policy ? "clk" : "policy", cpu);
197                 return 0;
198         }
199
200         return clk_get_rate(policy->clk) / 1000;
201 }
202 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
204 /**
205  * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
206  * @cpu: CPU to find the policy for.
207  *
208  * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
209  * the kobject reference counter of that policy.  Return a valid policy on
210  * success or NULL on failure.
211  *
212  * The policy returned by this function has to be released with the help of
213  * cpufreq_cpu_put() to balance its kobject reference counter properly.
214  */
215 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
216 {
217         struct cpufreq_policy *policy = NULL;
218         unsigned long flags;
219
220         if (WARN_ON(cpu >= nr_cpu_ids))
221                 return NULL;
222
223         /* get the cpufreq driver */
224         read_lock_irqsave(&cpufreq_driver_lock, flags);
225
226         if (cpufreq_driver) {
227                 /* get the CPU */
228                 policy = cpufreq_cpu_get_raw(cpu);
229                 if (policy)
230                         kobject_get(&policy->kobj);
231         }
232
233         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
234
235         return policy;
236 }
237 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
238
239 /**
240  * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
241  * @policy: cpufreq policy returned by cpufreq_cpu_get().
242  */
243 void cpufreq_cpu_put(struct cpufreq_policy *policy)
244 {
245         kobject_put(&policy->kobj);
246 }
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
248
249 /**
250  * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
251  * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
252  */
253 void cpufreq_cpu_release(struct cpufreq_policy *policy)
254 {
255         if (WARN_ON(!policy))
256                 return;
257
258         lockdep_assert_held(&policy->rwsem);
259
260         up_write(&policy->rwsem);
261
262         cpufreq_cpu_put(policy);
263 }
264
265 /**
266  * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
267  * @cpu: CPU to find the policy for.
268  *
269  * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
270  * if the policy returned by it is not NULL, acquire its rwsem for writing.
271  * Return the policy if it is active or release it and return NULL otherwise.
272  *
273  * The policy returned by this function has to be released with the help of
274  * cpufreq_cpu_release() in order to release its rwsem and balance its usage
275  * counter properly.
276  */
277 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
278 {
279         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
280
281         if (!policy)
282                 return NULL;
283
284         down_write(&policy->rwsem);
285
286         if (policy_is_inactive(policy)) {
287                 cpufreq_cpu_release(policy);
288                 return NULL;
289         }
290
291         return policy;
292 }
293
294 /*********************************************************************
295  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
296  *********************************************************************/
297
298 /**
299  * adjust_jiffies - Adjust the system "loops_per_jiffy".
300  * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
301  * @ci: Frequency change information.
302  *
303  * This function alters the system "loops_per_jiffy" for the clock
304  * speed change. Note that loops_per_jiffy cannot be updated on SMP
305  * systems as each CPU might be scaled differently. So, use the arch
306  * per-CPU loops_per_jiffy value wherever possible.
307  */
308 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
309 {
310 #ifndef CONFIG_SMP
311         static unsigned long l_p_j_ref;
312         static unsigned int l_p_j_ref_freq;
313
314         if (ci->flags & CPUFREQ_CONST_LOOPS)
315                 return;
316
317         if (!l_p_j_ref_freq) {
318                 l_p_j_ref = loops_per_jiffy;
319                 l_p_j_ref_freq = ci->old;
320                 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
321                          l_p_j_ref, l_p_j_ref_freq);
322         }
323         if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
324                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
325                                                                 ci->new);
326                 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
327                          loops_per_jiffy, ci->new);
328         }
329 #endif
330 }
331
332 /**
333  * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
334  * @policy: cpufreq policy to enable fast frequency switching for.
335  * @freqs: contain details of the frequency update.
336  * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
337  *
338  * This function calls the transition notifiers and adjust_jiffies().
339  *
340  * It is called twice on all CPU frequency changes that have external effects.
341  */
342 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
343                                       struct cpufreq_freqs *freqs,
344                                       unsigned int state)
345 {
346         int cpu;
347
348         BUG_ON(irqs_disabled());
349
350         if (cpufreq_disabled())
351                 return;
352
353         freqs->policy = policy;
354         freqs->flags = cpufreq_driver->flags;
355         pr_debug("notification %u of frequency transition to %u kHz\n",
356                  state, freqs->new);
357
358         switch (state) {
359         case CPUFREQ_PRECHANGE:
360                 /*
361                  * Detect if the driver reported a value as "old frequency"
362                  * which is not equal to what the cpufreq core thinks is
363                  * "old frequency".
364                  */
365                 if (policy->cur && policy->cur != freqs->old) {
366                         pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
367                                  freqs->old, policy->cur);
368                         freqs->old = policy->cur;
369                 }
370
371                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
372                                          CPUFREQ_PRECHANGE, freqs);
373
374                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
375                 break;
376
377         case CPUFREQ_POSTCHANGE:
378                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
379                 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
380                          cpumask_pr_args(policy->cpus));
381
382                 for_each_cpu(cpu, policy->cpus)
383                         trace_cpu_frequency(freqs->new, cpu);
384
385                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
386                                          CPUFREQ_POSTCHANGE, freqs);
387
388                 cpufreq_stats_record_transition(policy, freqs->new);
389                 policy->cur = freqs->new;
390         }
391 }
392
393 /* Do post notifications when there are chances that transition has failed */
394 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
395                 struct cpufreq_freqs *freqs, int transition_failed)
396 {
397         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
398         if (!transition_failed)
399                 return;
400
401         swap(freqs->old, freqs->new);
402         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
403         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
404 }
405
406 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
407                 struct cpufreq_freqs *freqs)
408 {
409
410         /*
411          * Catch double invocations of _begin() which lead to self-deadlock.
412          * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
413          * doesn't invoke _begin() on their behalf, and hence the chances of
414          * double invocations are very low. Moreover, there are scenarios
415          * where these checks can emit false-positive warnings in these
416          * drivers; so we avoid that by skipping them altogether.
417          */
418         WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
419                                 && current == policy->transition_task);
420
421 wait:
422         wait_event(policy->transition_wait, !policy->transition_ongoing);
423
424         spin_lock(&policy->transition_lock);
425
426         if (unlikely(policy->transition_ongoing)) {
427                 spin_unlock(&policy->transition_lock);
428                 goto wait;
429         }
430
431         policy->transition_ongoing = true;
432         policy->transition_task = current;
433
434         spin_unlock(&policy->transition_lock);
435
436         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
437 }
438 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
439
440 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
441                 struct cpufreq_freqs *freqs, int transition_failed)
442 {
443         if (WARN_ON(!policy->transition_ongoing))
444                 return;
445
446         cpufreq_notify_post_transition(policy, freqs, transition_failed);
447
448         arch_set_freq_scale(policy->related_cpus,
449                             policy->cur,
450                             policy->cpuinfo.max_freq);
451
452         policy->transition_ongoing = false;
453         policy->transition_task = NULL;
454
455         wake_up(&policy->transition_wait);
456 }
457 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
458
459 /*
460  * Fast frequency switching status count.  Positive means "enabled", negative
461  * means "disabled" and 0 means "not decided yet".
462  */
463 static int cpufreq_fast_switch_count;
464 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
465
466 static void cpufreq_list_transition_notifiers(void)
467 {
468         struct notifier_block *nb;
469
470         pr_info("Registered transition notifiers:\n");
471
472         mutex_lock(&cpufreq_transition_notifier_list.mutex);
473
474         for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
475                 pr_info("%pS\n", nb->notifier_call);
476
477         mutex_unlock(&cpufreq_transition_notifier_list.mutex);
478 }
479
480 /**
481  * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
482  * @policy: cpufreq policy to enable fast frequency switching for.
483  *
484  * Try to enable fast frequency switching for @policy.
485  *
486  * The attempt will fail if there is at least one transition notifier registered
487  * at this point, as fast frequency switching is quite fundamentally at odds
488  * with transition notifiers.  Thus if successful, it will make registration of
489  * transition notifiers fail going forward.
490  */
491 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
492 {
493         lockdep_assert_held(&policy->rwsem);
494
495         if (!policy->fast_switch_possible)
496                 return;
497
498         mutex_lock(&cpufreq_fast_switch_lock);
499         if (cpufreq_fast_switch_count >= 0) {
500                 cpufreq_fast_switch_count++;
501                 policy->fast_switch_enabled = true;
502         } else {
503                 pr_warn("CPU%u: Fast frequency switching not enabled\n",
504                         policy->cpu);
505                 cpufreq_list_transition_notifiers();
506         }
507         mutex_unlock(&cpufreq_fast_switch_lock);
508 }
509 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
510
511 /**
512  * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
513  * @policy: cpufreq policy to disable fast frequency switching for.
514  */
515 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
516 {
517         mutex_lock(&cpufreq_fast_switch_lock);
518         if (policy->fast_switch_enabled) {
519                 policy->fast_switch_enabled = false;
520                 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
521                         cpufreq_fast_switch_count--;
522         }
523         mutex_unlock(&cpufreq_fast_switch_lock);
524 }
525 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
526
527 /**
528  * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
529  * one.
530  * @policy: associated policy to interrogate
531  * @target_freq: target frequency to resolve.
532  *
533  * The target to driver frequency mapping is cached in the policy.
534  *
535  * Return: Lowest driver-supported frequency greater than or equal to the
536  * given target_freq, subject to policy (min/max) and driver limitations.
537  */
538 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
539                                          unsigned int target_freq)
540 {
541         target_freq = clamp_val(target_freq, policy->min, policy->max);
542         policy->cached_target_freq = target_freq;
543
544         if (cpufreq_driver->target_index) {
545                 unsigned int idx;
546
547                 idx = cpufreq_frequency_table_target(policy, target_freq,
548                                                      CPUFREQ_RELATION_L);
549                 policy->cached_resolved_idx = idx;
550                 return policy->freq_table[idx].frequency;
551         }
552
553         if (cpufreq_driver->resolve_freq)
554                 return cpufreq_driver->resolve_freq(policy, target_freq);
555
556         return target_freq;
557 }
558 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
559
560 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
561 {
562         unsigned int latency;
563
564         if (policy->transition_delay_us)
565                 return policy->transition_delay_us;
566
567         latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
568         if (latency) {
569                 /*
570                  * For platforms that can change the frequency very fast (< 10
571                  * us), the above formula gives a decent transition delay. But
572                  * for platforms where transition_latency is in milliseconds, it
573                  * ends up giving unrealistic values.
574                  *
575                  * Cap the default transition delay to 10 ms, which seems to be
576                  * a reasonable amount of time after which we should reevaluate
577                  * the frequency.
578                  */
579                 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
580         }
581
582         return LATENCY_MULTIPLIER;
583 }
584 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
585
586 /*********************************************************************
587  *                          SYSFS INTERFACE                          *
588  *********************************************************************/
589 static ssize_t show_boost(struct kobject *kobj,
590                           struct kobj_attribute *attr, char *buf)
591 {
592         return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
593 }
594
595 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
596                            const char *buf, size_t count)
597 {
598         int ret, enable;
599
600         ret = sscanf(buf, "%d", &enable);
601         if (ret != 1 || enable < 0 || enable > 1)
602                 return -EINVAL;
603
604         if (cpufreq_boost_trigger_state(enable)) {
605                 pr_err("%s: Cannot %s BOOST!\n",
606                        __func__, enable ? "enable" : "disable");
607                 return -EINVAL;
608         }
609
610         pr_debug("%s: cpufreq BOOST %s\n",
611                  __func__, enable ? "enabled" : "disabled");
612
613         return count;
614 }
615 define_one_global_rw(boost);
616
617 static struct cpufreq_governor *find_governor(const char *str_governor)
618 {
619         struct cpufreq_governor *t;
620
621         for_each_governor(t)
622                 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
623                         return t;
624
625         return NULL;
626 }
627
628 static struct cpufreq_governor *get_governor(const char *str_governor)
629 {
630         struct cpufreq_governor *t;
631
632         mutex_lock(&cpufreq_governor_mutex);
633         t = find_governor(str_governor);
634         if (!t)
635                 goto unlock;
636
637         if (!try_module_get(t->owner))
638                 t = NULL;
639
640 unlock:
641         mutex_unlock(&cpufreq_governor_mutex);
642
643         return t;
644 }
645
646 static unsigned int cpufreq_parse_policy(char *str_governor)
647 {
648         if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
649                 return CPUFREQ_POLICY_PERFORMANCE;
650
651         if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
652                 return CPUFREQ_POLICY_POWERSAVE;
653
654         return CPUFREQ_POLICY_UNKNOWN;
655 }
656
657 /**
658  * cpufreq_parse_governor - parse a governor string only for has_target()
659  * @str_governor: Governor name.
660  */
661 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
662 {
663         struct cpufreq_governor *t;
664
665         t = get_governor(str_governor);
666         if (t)
667                 return t;
668
669         if (request_module("cpufreq_%s", str_governor))
670                 return NULL;
671
672         return get_governor(str_governor);
673 }
674
675 /*
676  * cpufreq_per_cpu_attr_read() / show_##file_name() -
677  * print out cpufreq information
678  *
679  * Write out information from cpufreq_driver->policy[cpu]; object must be
680  * "unsigned int".
681  */
682
683 #define show_one(file_name, object)                     \
684 static ssize_t show_##file_name                         \
685 (struct cpufreq_policy *policy, char *buf)              \
686 {                                                       \
687         return sprintf(buf, "%u\n", policy->object);    \
688 }
689
690 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
691 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
692 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
693 show_one(scaling_min_freq, min);
694 show_one(scaling_max_freq, max);
695
696 __weak unsigned int arch_freq_get_on_cpu(int cpu)
697 {
698         return 0;
699 }
700
701 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
702 {
703         ssize_t ret;
704         unsigned int freq;
705
706         freq = arch_freq_get_on_cpu(policy->cpu);
707         if (freq)
708                 ret = sprintf(buf, "%u\n", freq);
709         else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
710                 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
711         else
712                 ret = sprintf(buf, "%u\n", policy->cur);
713         return ret;
714 }
715
716 /*
717  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
718  */
719 #define store_one(file_name, object)                    \
720 static ssize_t store_##file_name                                        \
721 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
722 {                                                                       \
723         unsigned long val;                                              \
724         int ret;                                                        \
725                                                                         \
726         ret = sscanf(buf, "%lu", &val);                                 \
727         if (ret != 1)                                                   \
728                 return -EINVAL;                                         \
729                                                                         \
730         ret = freq_qos_update_request(policy->object##_freq_req, val);\
731         return ret >= 0 ? count : ret;                                  \
732 }
733
734 store_one(scaling_min_freq, min);
735 store_one(scaling_max_freq, max);
736
737 /*
738  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
739  */
740 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
741                                         char *buf)
742 {
743         unsigned int cur_freq = __cpufreq_get(policy);
744
745         if (cur_freq)
746                 return sprintf(buf, "%u\n", cur_freq);
747
748         return sprintf(buf, "<unknown>\n");
749 }
750
751 /*
752  * show_scaling_governor - show the current policy for the specified CPU
753  */
754 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
755 {
756         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
757                 return sprintf(buf, "powersave\n");
758         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
759                 return sprintf(buf, "performance\n");
760         else if (policy->governor)
761                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
762                                 policy->governor->name);
763         return -EINVAL;
764 }
765
766 /*
767  * store_scaling_governor - store policy for the specified CPU
768  */
769 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
770                                         const char *buf, size_t count)
771 {
772         char str_governor[16];
773         int ret;
774
775         ret = sscanf(buf, "%15s", str_governor);
776         if (ret != 1)
777                 return -EINVAL;
778
779         if (cpufreq_driver->setpolicy) {
780                 unsigned int new_pol;
781
782                 new_pol = cpufreq_parse_policy(str_governor);
783                 if (!new_pol)
784                         return -EINVAL;
785
786                 ret = cpufreq_set_policy(policy, NULL, new_pol);
787         } else {
788                 struct cpufreq_governor *new_gov;
789
790                 new_gov = cpufreq_parse_governor(str_governor);
791                 if (!new_gov)
792                         return -EINVAL;
793
794                 ret = cpufreq_set_policy(policy, new_gov,
795                                          CPUFREQ_POLICY_UNKNOWN);
796
797                 module_put(new_gov->owner);
798         }
799
800         return ret ? ret : count;
801 }
802
803 /*
804  * show_scaling_driver - show the cpufreq driver currently loaded
805  */
806 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
807 {
808         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
809 }
810
811 /*
812  * show_scaling_available_governors - show the available CPUfreq governors
813  */
814 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
815                                                 char *buf)
816 {
817         ssize_t i = 0;
818         struct cpufreq_governor *t;
819
820         if (!has_target()) {
821                 i += sprintf(buf, "performance powersave");
822                 goto out;
823         }
824
825         mutex_lock(&cpufreq_governor_mutex);
826         for_each_governor(t) {
827                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
828                     - (CPUFREQ_NAME_LEN + 2)))
829                         break;
830                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
831         }
832         mutex_unlock(&cpufreq_governor_mutex);
833 out:
834         i += sprintf(&buf[i], "\n");
835         return i;
836 }
837
838 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
839 {
840         ssize_t i = 0;
841         unsigned int cpu;
842
843         for_each_cpu(cpu, mask) {
844                 if (i)
845                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
846                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
847                 if (i >= (PAGE_SIZE - 5))
848                         break;
849         }
850         i += sprintf(&buf[i], "\n");
851         return i;
852 }
853 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
854
855 /*
856  * show_related_cpus - show the CPUs affected by each transition even if
857  * hw coordination is in use
858  */
859 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
860 {
861         return cpufreq_show_cpus(policy->related_cpus, buf);
862 }
863
864 /*
865  * show_affected_cpus - show the CPUs affected by each transition
866  */
867 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
868 {
869         return cpufreq_show_cpus(policy->cpus, buf);
870 }
871
872 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
873                                         const char *buf, size_t count)
874 {
875         unsigned int freq = 0;
876         unsigned int ret;
877
878         if (!policy->governor || !policy->governor->store_setspeed)
879                 return -EINVAL;
880
881         ret = sscanf(buf, "%u", &freq);
882         if (ret != 1)
883                 return -EINVAL;
884
885         policy->governor->store_setspeed(policy, freq);
886
887         return count;
888 }
889
890 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
891 {
892         if (!policy->governor || !policy->governor->show_setspeed)
893                 return sprintf(buf, "<unsupported>\n");
894
895         return policy->governor->show_setspeed(policy, buf);
896 }
897
898 /*
899  * show_bios_limit - show the current cpufreq HW/BIOS limitation
900  */
901 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
902 {
903         unsigned int limit;
904         int ret;
905         ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
906         if (!ret)
907                 return sprintf(buf, "%u\n", limit);
908         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
909 }
910
911 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
912 cpufreq_freq_attr_ro(cpuinfo_min_freq);
913 cpufreq_freq_attr_ro(cpuinfo_max_freq);
914 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
915 cpufreq_freq_attr_ro(scaling_available_governors);
916 cpufreq_freq_attr_ro(scaling_driver);
917 cpufreq_freq_attr_ro(scaling_cur_freq);
918 cpufreq_freq_attr_ro(bios_limit);
919 cpufreq_freq_attr_ro(related_cpus);
920 cpufreq_freq_attr_ro(affected_cpus);
921 cpufreq_freq_attr_rw(scaling_min_freq);
922 cpufreq_freq_attr_rw(scaling_max_freq);
923 cpufreq_freq_attr_rw(scaling_governor);
924 cpufreq_freq_attr_rw(scaling_setspeed);
925
926 static struct attribute *default_attrs[] = {
927         &cpuinfo_min_freq.attr,
928         &cpuinfo_max_freq.attr,
929         &cpuinfo_transition_latency.attr,
930         &scaling_min_freq.attr,
931         &scaling_max_freq.attr,
932         &affected_cpus.attr,
933         &related_cpus.attr,
934         &scaling_governor.attr,
935         &scaling_driver.attr,
936         &scaling_available_governors.attr,
937         &scaling_setspeed.attr,
938         NULL
939 };
940
941 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
942 #define to_attr(a) container_of(a, struct freq_attr, attr)
943
944 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
945 {
946         struct cpufreq_policy *policy = to_policy(kobj);
947         struct freq_attr *fattr = to_attr(attr);
948         ssize_t ret;
949
950         if (!fattr->show)
951                 return -EIO;
952
953         down_read(&policy->rwsem);
954         ret = fattr->show(policy, buf);
955         up_read(&policy->rwsem);
956
957         return ret;
958 }
959
960 static ssize_t store(struct kobject *kobj, struct attribute *attr,
961                      const char *buf, size_t count)
962 {
963         struct cpufreq_policy *policy = to_policy(kobj);
964         struct freq_attr *fattr = to_attr(attr);
965         ssize_t ret = -EINVAL;
966
967         if (!fattr->store)
968                 return -EIO;
969
970         /*
971          * cpus_read_trylock() is used here to work around a circular lock
972          * dependency problem with respect to the cpufreq_register_driver().
973          */
974         if (!cpus_read_trylock())
975                 return -EBUSY;
976
977         if (cpu_online(policy->cpu)) {
978                 down_write(&policy->rwsem);
979                 ret = fattr->store(policy, buf, count);
980                 up_write(&policy->rwsem);
981         }
982
983         cpus_read_unlock();
984
985         return ret;
986 }
987
988 static void cpufreq_sysfs_release(struct kobject *kobj)
989 {
990         struct cpufreq_policy *policy = to_policy(kobj);
991         pr_debug("last reference is dropped\n");
992         complete(&policy->kobj_unregister);
993 }
994
995 static const struct sysfs_ops sysfs_ops = {
996         .show   = show,
997         .store  = store,
998 };
999
1000 static struct kobj_type ktype_cpufreq = {
1001         .sysfs_ops      = &sysfs_ops,
1002         .default_attrs  = default_attrs,
1003         .release        = cpufreq_sysfs_release,
1004 };
1005
1006 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1007 {
1008         struct device *dev = get_cpu_device(cpu);
1009
1010         if (unlikely(!dev))
1011                 return;
1012
1013         if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1014                 return;
1015
1016         dev_dbg(dev, "%s: Adding symlink\n", __func__);
1017         if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1018                 dev_err(dev, "cpufreq symlink creation failed\n");
1019 }
1020
1021 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1022                                    struct device *dev)
1023 {
1024         dev_dbg(dev, "%s: Removing symlink\n", __func__);
1025         sysfs_remove_link(&dev->kobj, "cpufreq");
1026 }
1027
1028 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1029 {
1030         struct freq_attr **drv_attr;
1031         int ret = 0;
1032
1033         /* set up files for this cpu device */
1034         drv_attr = cpufreq_driver->attr;
1035         while (drv_attr && *drv_attr) {
1036                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1037                 if (ret)
1038                         return ret;
1039                 drv_attr++;
1040         }
1041         if (cpufreq_driver->get) {
1042                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1043                 if (ret)
1044                         return ret;
1045         }
1046
1047         ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1048         if (ret)
1049                 return ret;
1050
1051         if (cpufreq_driver->bios_limit) {
1052                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1053                 if (ret)
1054                         return ret;
1055         }
1056
1057         return 0;
1058 }
1059
1060 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1061 {
1062         struct cpufreq_governor *gov = NULL;
1063         unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1064         int ret;
1065
1066         if (has_target()) {
1067                 /* Update policy governor to the one used before hotplug. */
1068                 gov = get_governor(policy->last_governor);
1069                 if (gov) {
1070                         pr_debug("Restoring governor %s for cpu %d\n",
1071                                  gov->name, policy->cpu);
1072                 } else {
1073                         gov = get_governor(default_governor);
1074                 }
1075
1076                 if (!gov) {
1077                         gov = cpufreq_default_governor();
1078                         __module_get(gov->owner);
1079                 }
1080
1081         } else {
1082
1083                 /* Use the default policy if there is no last_policy. */
1084                 if (policy->last_policy) {
1085                         pol = policy->last_policy;
1086                 } else {
1087                         pol = cpufreq_parse_policy(default_governor);
1088                         /*
1089                          * In case the default governor is neither "performance"
1090                          * nor "powersave", fall back to the initial policy
1091                          * value set by the driver.
1092                          */
1093                         if (pol == CPUFREQ_POLICY_UNKNOWN)
1094                                 pol = policy->policy;
1095                 }
1096                 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1097                     pol != CPUFREQ_POLICY_POWERSAVE)
1098                         return -ENODATA;
1099         }
1100
1101         ret = cpufreq_set_policy(policy, gov, pol);
1102         if (gov)
1103                 module_put(gov->owner);
1104
1105         return ret;
1106 }
1107
1108 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1109 {
1110         int ret = 0;
1111
1112         /* Has this CPU been taken care of already? */
1113         if (cpumask_test_cpu(cpu, policy->cpus))
1114                 return 0;
1115
1116         down_write(&policy->rwsem);
1117         if (has_target())
1118                 cpufreq_stop_governor(policy);
1119
1120         cpumask_set_cpu(cpu, policy->cpus);
1121
1122         if (has_target()) {
1123                 ret = cpufreq_start_governor(policy);
1124                 if (ret)
1125                         pr_err("%s: Failed to start governor\n", __func__);
1126         }
1127         up_write(&policy->rwsem);
1128         return ret;
1129 }
1130
1131 void refresh_frequency_limits(struct cpufreq_policy *policy)
1132 {
1133         if (!policy_is_inactive(policy)) {
1134                 pr_debug("updating policy for CPU %u\n", policy->cpu);
1135
1136                 cpufreq_set_policy(policy, policy->governor, policy->policy);
1137         }
1138 }
1139 EXPORT_SYMBOL(refresh_frequency_limits);
1140
1141 static void handle_update(struct work_struct *work)
1142 {
1143         struct cpufreq_policy *policy =
1144                 container_of(work, struct cpufreq_policy, update);
1145
1146         pr_debug("handle_update for cpu %u called\n", policy->cpu);
1147         down_write(&policy->rwsem);
1148         refresh_frequency_limits(policy);
1149         up_write(&policy->rwsem);
1150 }
1151
1152 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1153                                 void *data)
1154 {
1155         struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1156
1157         schedule_work(&policy->update);
1158         return 0;
1159 }
1160
1161 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1162                                 void *data)
1163 {
1164         struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1165
1166         schedule_work(&policy->update);
1167         return 0;
1168 }
1169
1170 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1171 {
1172         struct kobject *kobj;
1173         struct completion *cmp;
1174
1175         down_write(&policy->rwsem);
1176         cpufreq_stats_free_table(policy);
1177         kobj = &policy->kobj;
1178         cmp = &policy->kobj_unregister;
1179         up_write(&policy->rwsem);
1180         kobject_put(kobj);
1181
1182         /*
1183          * We need to make sure that the underlying kobj is
1184          * actually not referenced anymore by anybody before we
1185          * proceed with unloading.
1186          */
1187         pr_debug("waiting for dropping of refcount\n");
1188         wait_for_completion(cmp);
1189         pr_debug("wait complete\n");
1190 }
1191
1192 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1193 {
1194         struct cpufreq_policy *policy;
1195         struct device *dev = get_cpu_device(cpu);
1196         int ret;
1197
1198         if (!dev)
1199                 return NULL;
1200
1201         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1202         if (!policy)
1203                 return NULL;
1204
1205         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1206                 goto err_free_policy;
1207
1208         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1209                 goto err_free_cpumask;
1210
1211         if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1212                 goto err_free_rcpumask;
1213
1214         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1215                                    cpufreq_global_kobject, "policy%u", cpu);
1216         if (ret) {
1217                 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1218                 /*
1219                  * The entire policy object will be freed below, but the extra
1220                  * memory allocated for the kobject name needs to be freed by
1221                  * releasing the kobject.
1222                  */
1223                 kobject_put(&policy->kobj);
1224                 goto err_free_real_cpus;
1225         }
1226
1227         freq_constraints_init(&policy->constraints);
1228
1229         policy->nb_min.notifier_call = cpufreq_notifier_min;
1230         policy->nb_max.notifier_call = cpufreq_notifier_max;
1231
1232         ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1233                                     &policy->nb_min);
1234         if (ret) {
1235                 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1236                         ret, cpumask_pr_args(policy->cpus));
1237                 goto err_kobj_remove;
1238         }
1239
1240         ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1241                                     &policy->nb_max);
1242         if (ret) {
1243                 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1244                         ret, cpumask_pr_args(policy->cpus));
1245                 goto err_min_qos_notifier;
1246         }
1247
1248         INIT_LIST_HEAD(&policy->policy_list);
1249         init_rwsem(&policy->rwsem);
1250         spin_lock_init(&policy->transition_lock);
1251         init_waitqueue_head(&policy->transition_wait);
1252         init_completion(&policy->kobj_unregister);
1253         INIT_WORK(&policy->update, handle_update);
1254
1255         policy->cpu = cpu;
1256         return policy;
1257
1258 err_min_qos_notifier:
1259         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1260                                  &policy->nb_min);
1261 err_kobj_remove:
1262         cpufreq_policy_put_kobj(policy);
1263 err_free_real_cpus:
1264         free_cpumask_var(policy->real_cpus);
1265 err_free_rcpumask:
1266         free_cpumask_var(policy->related_cpus);
1267 err_free_cpumask:
1268         free_cpumask_var(policy->cpus);
1269 err_free_policy:
1270         kfree(policy);
1271
1272         return NULL;
1273 }
1274
1275 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1276 {
1277         unsigned long flags;
1278         int cpu;
1279
1280         /* Remove policy from list */
1281         write_lock_irqsave(&cpufreq_driver_lock, flags);
1282         list_del(&policy->policy_list);
1283
1284         for_each_cpu(cpu, policy->related_cpus)
1285                 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1286         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1287
1288         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1289                                  &policy->nb_max);
1290         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1291                                  &policy->nb_min);
1292
1293         /* Cancel any pending policy->update work before freeing the policy. */
1294         cancel_work_sync(&policy->update);
1295
1296         if (policy->max_freq_req) {
1297                 /*
1298                  * CPUFREQ_CREATE_POLICY notification is sent only after
1299                  * successfully adding max_freq_req request.
1300                  */
1301                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1302                                              CPUFREQ_REMOVE_POLICY, policy);
1303                 freq_qos_remove_request(policy->max_freq_req);
1304         }
1305
1306         freq_qos_remove_request(policy->min_freq_req);
1307         kfree(policy->min_freq_req);
1308
1309         cpufreq_policy_put_kobj(policy);
1310         free_cpumask_var(policy->real_cpus);
1311         free_cpumask_var(policy->related_cpus);
1312         free_cpumask_var(policy->cpus);
1313         kfree(policy);
1314 }
1315
1316 static int cpufreq_online(unsigned int cpu)
1317 {
1318         struct cpufreq_policy *policy;
1319         bool new_policy;
1320         unsigned long flags;
1321         unsigned int j;
1322         int ret;
1323
1324         pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1325
1326         /* Check if this CPU already has a policy to manage it */
1327         policy = per_cpu(cpufreq_cpu_data, cpu);
1328         if (policy) {
1329                 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1330                 if (!policy_is_inactive(policy))
1331                         return cpufreq_add_policy_cpu(policy, cpu);
1332
1333                 /* This is the only online CPU for the policy.  Start over. */
1334                 new_policy = false;
1335                 down_write(&policy->rwsem);
1336                 policy->cpu = cpu;
1337                 policy->governor = NULL;
1338                 up_write(&policy->rwsem);
1339         } else {
1340                 new_policy = true;
1341                 policy = cpufreq_policy_alloc(cpu);
1342                 if (!policy)
1343                         return -ENOMEM;
1344         }
1345
1346         if (!new_policy && cpufreq_driver->online) {
1347                 ret = cpufreq_driver->online(policy);
1348                 if (ret) {
1349                         pr_debug("%s: %d: initialization failed\n", __func__,
1350                                  __LINE__);
1351                         goto out_exit_policy;
1352                 }
1353
1354                 /* Recover policy->cpus using related_cpus */
1355                 cpumask_copy(policy->cpus, policy->related_cpus);
1356         } else {
1357                 cpumask_copy(policy->cpus, cpumask_of(cpu));
1358
1359                 /*
1360                  * Call driver. From then on the cpufreq must be able
1361                  * to accept all calls to ->verify and ->setpolicy for this CPU.
1362                  */
1363                 ret = cpufreq_driver->init(policy);
1364                 if (ret) {
1365                         pr_debug("%s: %d: initialization failed\n", __func__,
1366                                  __LINE__);
1367                         goto out_free_policy;
1368                 }
1369
1370                 ret = cpufreq_table_validate_and_sort(policy);
1371                 if (ret)
1372                         goto out_exit_policy;
1373
1374                 /* related_cpus should at least include policy->cpus. */
1375                 cpumask_copy(policy->related_cpus, policy->cpus);
1376         }
1377
1378         down_write(&policy->rwsem);
1379         /*
1380          * affected cpus must always be the one, which are online. We aren't
1381          * managing offline cpus here.
1382          */
1383         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1384
1385         if (new_policy) {
1386                 for_each_cpu(j, policy->related_cpus) {
1387                         per_cpu(cpufreq_cpu_data, j) = policy;
1388                         add_cpu_dev_symlink(policy, j);
1389                 }
1390
1391                 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1392                                                GFP_KERNEL);
1393                 if (!policy->min_freq_req) {
1394                         ret = -ENOMEM;
1395                         goto out_destroy_policy;
1396                 }
1397
1398                 ret = freq_qos_add_request(&policy->constraints,
1399                                            policy->min_freq_req, FREQ_QOS_MIN,
1400                                            policy->min);
1401                 if (ret < 0) {
1402                         /*
1403                          * So we don't call freq_qos_remove_request() for an
1404                          * uninitialized request.
1405                          */
1406                         kfree(policy->min_freq_req);
1407                         policy->min_freq_req = NULL;
1408                         goto out_destroy_policy;
1409                 }
1410
1411                 /*
1412                  * This must be initialized right here to avoid calling
1413                  * freq_qos_remove_request() on uninitialized request in case
1414                  * of errors.
1415                  */
1416                 policy->max_freq_req = policy->min_freq_req + 1;
1417
1418                 ret = freq_qos_add_request(&policy->constraints,
1419                                            policy->max_freq_req, FREQ_QOS_MAX,
1420                                            policy->max);
1421                 if (ret < 0) {
1422                         policy->max_freq_req = NULL;
1423                         goto out_destroy_policy;
1424                 }
1425
1426                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1427                                 CPUFREQ_CREATE_POLICY, policy);
1428         }
1429
1430         if (cpufreq_driver->get && has_target()) {
1431                 policy->cur = cpufreq_driver->get(policy->cpu);
1432                 if (!policy->cur) {
1433                         ret = -EIO;
1434                         pr_err("%s: ->get() failed\n", __func__);
1435                         goto out_destroy_policy;
1436                 }
1437         }
1438
1439         /*
1440          * Sometimes boot loaders set CPU frequency to a value outside of
1441          * frequency table present with cpufreq core. In such cases CPU might be
1442          * unstable if it has to run on that frequency for long duration of time
1443          * and so its better to set it to a frequency which is specified in
1444          * freq-table. This also makes cpufreq stats inconsistent as
1445          * cpufreq-stats would fail to register because current frequency of CPU
1446          * isn't found in freq-table.
1447          *
1448          * Because we don't want this change to effect boot process badly, we go
1449          * for the next freq which is >= policy->cur ('cur' must be set by now,
1450          * otherwise we will end up setting freq to lowest of the table as 'cur'
1451          * is initialized to zero).
1452          *
1453          * We are passing target-freq as "policy->cur - 1" otherwise
1454          * __cpufreq_driver_target() would simply fail, as policy->cur will be
1455          * equal to target-freq.
1456          */
1457         if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1458             && has_target()) {
1459                 unsigned int old_freq = policy->cur;
1460
1461                 /* Are we running at unknown frequency ? */
1462                 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1463                 if (ret == -EINVAL) {
1464                         ret = __cpufreq_driver_target(policy, old_freq - 1,
1465                                                       CPUFREQ_RELATION_L);
1466
1467                         /*
1468                          * Reaching here after boot in a few seconds may not
1469                          * mean that system will remain stable at "unknown"
1470                          * frequency for longer duration. Hence, a BUG_ON().
1471                          */
1472                         BUG_ON(ret);
1473                         pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1474                                 __func__, policy->cpu, old_freq, policy->cur);
1475                 }
1476         }
1477
1478         if (new_policy) {
1479                 ret = cpufreq_add_dev_interface(policy);
1480                 if (ret)
1481                         goto out_destroy_policy;
1482
1483                 cpufreq_stats_create_table(policy);
1484
1485                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1486                 list_add(&policy->policy_list, &cpufreq_policy_list);
1487                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1488         }
1489
1490         ret = cpufreq_init_policy(policy);
1491         if (ret) {
1492                 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1493                        __func__, cpu, ret);
1494                 goto out_destroy_policy;
1495         }
1496
1497         up_write(&policy->rwsem);
1498
1499         kobject_uevent(&policy->kobj, KOBJ_ADD);
1500
1501         /* Callback for handling stuff after policy is ready */
1502         if (cpufreq_driver->ready)
1503                 cpufreq_driver->ready(policy);
1504
1505         if (cpufreq_thermal_control_enabled(cpufreq_driver))
1506                 policy->cdev = of_cpufreq_cooling_register(policy);
1507
1508         pr_debug("initialization complete\n");
1509
1510         return 0;
1511
1512 out_destroy_policy:
1513         for_each_cpu(j, policy->real_cpus)
1514                 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1515
1516         up_write(&policy->rwsem);
1517
1518 out_exit_policy:
1519         if (cpufreq_driver->exit)
1520                 cpufreq_driver->exit(policy);
1521
1522 out_free_policy:
1523         cpufreq_policy_free(policy);
1524         return ret;
1525 }
1526
1527 /**
1528  * cpufreq_add_dev - the cpufreq interface for a CPU device.
1529  * @dev: CPU device.
1530  * @sif: Subsystem interface structure pointer (not used)
1531  */
1532 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1533 {
1534         struct cpufreq_policy *policy;
1535         unsigned cpu = dev->id;
1536         int ret;
1537
1538         dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1539
1540         if (cpu_online(cpu)) {
1541                 ret = cpufreq_online(cpu);
1542                 if (ret)
1543                         return ret;
1544         }
1545
1546         /* Create sysfs link on CPU registration */
1547         policy = per_cpu(cpufreq_cpu_data, cpu);
1548         if (policy)
1549                 add_cpu_dev_symlink(policy, cpu);
1550
1551         return 0;
1552 }
1553
1554 static int cpufreq_offline(unsigned int cpu)
1555 {
1556         struct cpufreq_policy *policy;
1557         int ret;
1558
1559         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1560
1561         policy = cpufreq_cpu_get_raw(cpu);
1562         if (!policy) {
1563                 pr_debug("%s: No cpu_data found\n", __func__);
1564                 return 0;
1565         }
1566
1567         down_write(&policy->rwsem);
1568         if (has_target())
1569                 cpufreq_stop_governor(policy);
1570
1571         cpumask_clear_cpu(cpu, policy->cpus);
1572
1573         if (policy_is_inactive(policy)) {
1574                 if (has_target())
1575                         strncpy(policy->last_governor, policy->governor->name,
1576                                 CPUFREQ_NAME_LEN);
1577                 else
1578                         policy->last_policy = policy->policy;
1579         } else if (cpu == policy->cpu) {
1580                 /* Nominate new CPU */
1581                 policy->cpu = cpumask_any(policy->cpus);
1582         }
1583
1584         /* Start governor again for active policy */
1585         if (!policy_is_inactive(policy)) {
1586                 if (has_target()) {
1587                         ret = cpufreq_start_governor(policy);
1588                         if (ret)
1589                                 pr_err("%s: Failed to start governor\n", __func__);
1590                 }
1591
1592                 goto unlock;
1593         }
1594
1595         if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1596                 cpufreq_cooling_unregister(policy->cdev);
1597                 policy->cdev = NULL;
1598         }
1599
1600         if (cpufreq_driver->stop_cpu)
1601                 cpufreq_driver->stop_cpu(policy);
1602
1603         if (has_target())
1604                 cpufreq_exit_governor(policy);
1605
1606         /*
1607          * Perform the ->offline() during light-weight tear-down, as
1608          * that allows fast recovery when the CPU comes back.
1609          */
1610         if (cpufreq_driver->offline) {
1611                 cpufreq_driver->offline(policy);
1612         } else if (cpufreq_driver->exit) {
1613                 cpufreq_driver->exit(policy);
1614                 policy->freq_table = NULL;
1615         }
1616
1617 unlock:
1618         up_write(&policy->rwsem);
1619         return 0;
1620 }
1621
1622 /*
1623  * cpufreq_remove_dev - remove a CPU device
1624  *
1625  * Removes the cpufreq interface for a CPU device.
1626  */
1627 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1628 {
1629         unsigned int cpu = dev->id;
1630         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1631
1632         if (!policy)
1633                 return;
1634
1635         if (cpu_online(cpu))
1636                 cpufreq_offline(cpu);
1637
1638         cpumask_clear_cpu(cpu, policy->real_cpus);
1639         remove_cpu_dev_symlink(policy, dev);
1640
1641         if (cpumask_empty(policy->real_cpus)) {
1642                 /* We did light-weight exit earlier, do full tear down now */
1643                 if (cpufreq_driver->offline)
1644                         cpufreq_driver->exit(policy);
1645
1646                 cpufreq_policy_free(policy);
1647         }
1648 }
1649
1650 /**
1651  * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1652  * @policy: Policy managing CPUs.
1653  * @new_freq: New CPU frequency.
1654  *
1655  * Adjust to the current frequency first and clean up later by either calling
1656  * cpufreq_update_policy(), or scheduling handle_update().
1657  */
1658 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1659                                 unsigned int new_freq)
1660 {
1661         struct cpufreq_freqs freqs;
1662
1663         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1664                  policy->cur, new_freq);
1665
1666         freqs.old = policy->cur;
1667         freqs.new = new_freq;
1668
1669         cpufreq_freq_transition_begin(policy, &freqs);
1670         cpufreq_freq_transition_end(policy, &freqs, 0);
1671 }
1672
1673 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1674 {
1675         unsigned int new_freq;
1676
1677         new_freq = cpufreq_driver->get(policy->cpu);
1678         if (!new_freq)
1679                 return 0;
1680
1681         /*
1682          * If fast frequency switching is used with the given policy, the check
1683          * against policy->cur is pointless, so skip it in that case.
1684          */
1685         if (policy->fast_switch_enabled || !has_target())
1686                 return new_freq;
1687
1688         if (policy->cur != new_freq) {
1689                 cpufreq_out_of_sync(policy, new_freq);
1690                 if (update)
1691                         schedule_work(&policy->update);
1692         }
1693
1694         return new_freq;
1695 }
1696
1697 /**
1698  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1699  * @cpu: CPU number
1700  *
1701  * This is the last known freq, without actually getting it from the driver.
1702  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1703  */
1704 unsigned int cpufreq_quick_get(unsigned int cpu)
1705 {
1706         struct cpufreq_policy *policy;
1707         unsigned int ret_freq = 0;
1708         unsigned long flags;
1709
1710         read_lock_irqsave(&cpufreq_driver_lock, flags);
1711
1712         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1713                 ret_freq = cpufreq_driver->get(cpu);
1714                 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1715                 return ret_freq;
1716         }
1717
1718         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1719
1720         policy = cpufreq_cpu_get(cpu);
1721         if (policy) {
1722                 ret_freq = policy->cur;
1723                 cpufreq_cpu_put(policy);
1724         }
1725
1726         return ret_freq;
1727 }
1728 EXPORT_SYMBOL(cpufreq_quick_get);
1729
1730 /**
1731  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1732  * @cpu: CPU number
1733  *
1734  * Just return the max possible frequency for a given CPU.
1735  */
1736 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1737 {
1738         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1739         unsigned int ret_freq = 0;
1740
1741         if (policy) {
1742                 ret_freq = policy->max;
1743                 cpufreq_cpu_put(policy);
1744         }
1745
1746         return ret_freq;
1747 }
1748 EXPORT_SYMBOL(cpufreq_quick_get_max);
1749
1750 /**
1751  * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1752  * @cpu: CPU number
1753  *
1754  * The default return value is the max_freq field of cpuinfo.
1755  */
1756 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1757 {
1758         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1759         unsigned int ret_freq = 0;
1760
1761         if (policy) {
1762                 ret_freq = policy->cpuinfo.max_freq;
1763                 cpufreq_cpu_put(policy);
1764         }
1765
1766         return ret_freq;
1767 }
1768 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1769
1770 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1771 {
1772         if (unlikely(policy_is_inactive(policy)))
1773                 return 0;
1774
1775         return cpufreq_verify_current_freq(policy, true);
1776 }
1777
1778 /**
1779  * cpufreq_get - get the current CPU frequency (in kHz)
1780  * @cpu: CPU number
1781  *
1782  * Get the CPU current (static) CPU frequency
1783  */
1784 unsigned int cpufreq_get(unsigned int cpu)
1785 {
1786         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1787         unsigned int ret_freq = 0;
1788
1789         if (policy) {
1790                 down_read(&policy->rwsem);
1791                 if (cpufreq_driver->get)
1792                         ret_freq = __cpufreq_get(policy);
1793                 up_read(&policy->rwsem);
1794
1795                 cpufreq_cpu_put(policy);
1796         }
1797
1798         return ret_freq;
1799 }
1800 EXPORT_SYMBOL(cpufreq_get);
1801
1802 static struct subsys_interface cpufreq_interface = {
1803         .name           = "cpufreq",
1804         .subsys         = &cpu_subsys,
1805         .add_dev        = cpufreq_add_dev,
1806         .remove_dev     = cpufreq_remove_dev,
1807 };
1808
1809 /*
1810  * In case platform wants some specific frequency to be configured
1811  * during suspend..
1812  */
1813 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1814 {
1815         int ret;
1816
1817         if (!policy->suspend_freq) {
1818                 pr_debug("%s: suspend_freq not defined\n", __func__);
1819                 return 0;
1820         }
1821
1822         pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1823                         policy->suspend_freq);
1824
1825         ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1826                         CPUFREQ_RELATION_H);
1827         if (ret)
1828                 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1829                                 __func__, policy->suspend_freq, ret);
1830
1831         return ret;
1832 }
1833 EXPORT_SYMBOL(cpufreq_generic_suspend);
1834
1835 /**
1836  * cpufreq_suspend() - Suspend CPUFreq governors.
1837  *
1838  * Called during system wide Suspend/Hibernate cycles for suspending governors
1839  * as some platforms can't change frequency after this point in suspend cycle.
1840  * Because some of the devices (like: i2c, regulators, etc) they use for
1841  * changing frequency are suspended quickly after this point.
1842  */
1843 void cpufreq_suspend(void)
1844 {
1845         struct cpufreq_policy *policy;
1846
1847         if (!cpufreq_driver)
1848                 return;
1849
1850         if (!has_target() && !cpufreq_driver->suspend)
1851                 goto suspend;
1852
1853         pr_debug("%s: Suspending Governors\n", __func__);
1854
1855         for_each_active_policy(policy) {
1856                 if (has_target()) {
1857                         down_write(&policy->rwsem);
1858                         cpufreq_stop_governor(policy);
1859                         up_write(&policy->rwsem);
1860                 }
1861
1862                 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1863                         pr_err("%s: Failed to suspend driver: %s\n", __func__,
1864                                 cpufreq_driver->name);
1865         }
1866
1867 suspend:
1868         cpufreq_suspended = true;
1869 }
1870
1871 /**
1872  * cpufreq_resume() - Resume CPUFreq governors.
1873  *
1874  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1875  * are suspended with cpufreq_suspend().
1876  */
1877 void cpufreq_resume(void)
1878 {
1879         struct cpufreq_policy *policy;
1880         int ret;
1881
1882         if (!cpufreq_driver)
1883                 return;
1884
1885         if (unlikely(!cpufreq_suspended))
1886                 return;
1887
1888         cpufreq_suspended = false;
1889
1890         if (!has_target() && !cpufreq_driver->resume)
1891                 return;
1892
1893         pr_debug("%s: Resuming Governors\n", __func__);
1894
1895         for_each_active_policy(policy) {
1896                 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1897                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1898                                 policy);
1899                 } else if (has_target()) {
1900                         down_write(&policy->rwsem);
1901                         ret = cpufreq_start_governor(policy);
1902                         up_write(&policy->rwsem);
1903
1904                         if (ret)
1905                                 pr_err("%s: Failed to start governor for policy: %p\n",
1906                                        __func__, policy);
1907                 }
1908         }
1909 }
1910
1911 /**
1912  * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
1913  * @flags: Flags to test against the current cpufreq driver's flags.
1914  *
1915  * Assumes that the driver is there, so callers must ensure that this is the
1916  * case.
1917  */
1918 bool cpufreq_driver_test_flags(u16 flags)
1919 {
1920         return !!(cpufreq_driver->flags & flags);
1921 }
1922
1923 /**
1924  * cpufreq_get_current_driver - Return the current driver's name.
1925  *
1926  * Return the name string of the currently registered cpufreq driver or NULL if
1927  * none.
1928  */
1929 const char *cpufreq_get_current_driver(void)
1930 {
1931         if (cpufreq_driver)
1932                 return cpufreq_driver->name;
1933
1934         return NULL;
1935 }
1936 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1937
1938 /**
1939  * cpufreq_get_driver_data - Return current driver data.
1940  *
1941  * Return the private data of the currently registered cpufreq driver, or NULL
1942  * if no cpufreq driver has been registered.
1943  */
1944 void *cpufreq_get_driver_data(void)
1945 {
1946         if (cpufreq_driver)
1947                 return cpufreq_driver->driver_data;
1948
1949         return NULL;
1950 }
1951 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1952
1953 /*********************************************************************
1954  *                     NOTIFIER LISTS INTERFACE                      *
1955  *********************************************************************/
1956
1957 /**
1958  * cpufreq_register_notifier - Register a notifier with cpufreq.
1959  * @nb: notifier function to register.
1960  * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
1961  *
1962  * Add a notifier to one of two lists: either a list of notifiers that run on
1963  * clock rate changes (once before and once after every transition), or a list
1964  * of notifiers that ron on cpufreq policy changes.
1965  *
1966  * This function may sleep and it has the same return values as
1967  * blocking_notifier_chain_register().
1968  */
1969 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1970 {
1971         int ret;
1972
1973         if (cpufreq_disabled())
1974                 return -EINVAL;
1975
1976         switch (list) {
1977         case CPUFREQ_TRANSITION_NOTIFIER:
1978                 mutex_lock(&cpufreq_fast_switch_lock);
1979
1980                 if (cpufreq_fast_switch_count > 0) {
1981                         mutex_unlock(&cpufreq_fast_switch_lock);
1982                         return -EBUSY;
1983                 }
1984                 ret = srcu_notifier_chain_register(
1985                                 &cpufreq_transition_notifier_list, nb);
1986                 if (!ret)
1987                         cpufreq_fast_switch_count--;
1988
1989                 mutex_unlock(&cpufreq_fast_switch_lock);
1990                 break;
1991         case CPUFREQ_POLICY_NOTIFIER:
1992                 ret = blocking_notifier_chain_register(
1993                                 &cpufreq_policy_notifier_list, nb);
1994                 break;
1995         default:
1996                 ret = -EINVAL;
1997         }
1998
1999         return ret;
2000 }
2001 EXPORT_SYMBOL(cpufreq_register_notifier);
2002
2003 /**
2004  * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2005  * @nb: notifier block to be unregistered.
2006  * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2007  *
2008  * Remove a notifier from one of the cpufreq notifier lists.
2009  *
2010  * This function may sleep and it has the same return values as
2011  * blocking_notifier_chain_unregister().
2012  */
2013 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2014 {
2015         int ret;
2016
2017         if (cpufreq_disabled())
2018                 return -EINVAL;
2019
2020         switch (list) {
2021         case CPUFREQ_TRANSITION_NOTIFIER:
2022                 mutex_lock(&cpufreq_fast_switch_lock);
2023
2024                 ret = srcu_notifier_chain_unregister(
2025                                 &cpufreq_transition_notifier_list, nb);
2026                 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2027                         cpufreq_fast_switch_count++;
2028
2029                 mutex_unlock(&cpufreq_fast_switch_lock);
2030                 break;
2031         case CPUFREQ_POLICY_NOTIFIER:
2032                 ret = blocking_notifier_chain_unregister(
2033                                 &cpufreq_policy_notifier_list, nb);
2034                 break;
2035         default:
2036                 ret = -EINVAL;
2037         }
2038
2039         return ret;
2040 }
2041 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2042
2043
2044 /*********************************************************************
2045  *                              GOVERNORS                            *
2046  *********************************************************************/
2047
2048 /**
2049  * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2050  * @policy: cpufreq policy to switch the frequency for.
2051  * @target_freq: New frequency to set (may be approximate).
2052  *
2053  * Carry out a fast frequency switch without sleeping.
2054  *
2055  * The driver's ->fast_switch() callback invoked by this function must be
2056  * suitable for being called from within RCU-sched read-side critical sections
2057  * and it is expected to select the minimum available frequency greater than or
2058  * equal to @target_freq (CPUFREQ_RELATION_L).
2059  *
2060  * This function must not be called if policy->fast_switch_enabled is unset.
2061  *
2062  * Governors calling this function must guarantee that it will never be invoked
2063  * twice in parallel for the same policy and that it will never be called in
2064  * parallel with either ->target() or ->target_index() for the same policy.
2065  *
2066  * Returns the actual frequency set for the CPU.
2067  *
2068  * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2069  * error condition, the hardware configuration must be preserved.
2070  */
2071 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2072                                         unsigned int target_freq)
2073 {
2074         unsigned int freq;
2075         int cpu;
2076
2077         target_freq = clamp_val(target_freq, policy->min, policy->max);
2078         freq = cpufreq_driver->fast_switch(policy, target_freq);
2079
2080         if (!freq)
2081                 return 0;
2082
2083         policy->cur = freq;
2084         arch_set_freq_scale(policy->related_cpus, freq,
2085                             policy->cpuinfo.max_freq);
2086         cpufreq_stats_record_transition(policy, freq);
2087
2088         if (trace_cpu_frequency_enabled()) {
2089                 for_each_cpu(cpu, policy->cpus)
2090                         trace_cpu_frequency(freq, cpu);
2091         }
2092
2093         return freq;
2094 }
2095 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2096
2097 /**
2098  * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2099  * @cpu: Target CPU.
2100  * @min_perf: Minimum (required) performance level (units of @capacity).
2101  * @target_perf: Target (desired) performance level (units of @capacity).
2102  * @capacity: Capacity of the target CPU.
2103  *
2104  * Carry out a fast performance level switch of @cpu without sleeping.
2105  *
2106  * The driver's ->adjust_perf() callback invoked by this function must be
2107  * suitable for being called from within RCU-sched read-side critical sections
2108  * and it is expected to select a suitable performance level equal to or above
2109  * @min_perf and preferably equal to or below @target_perf.
2110  *
2111  * This function must not be called if policy->fast_switch_enabled is unset.
2112  *
2113  * Governors calling this function must guarantee that it will never be invoked
2114  * twice in parallel for the same CPU and that it will never be called in
2115  * parallel with either ->target() or ->target_index() or ->fast_switch() for
2116  * the same CPU.
2117  */
2118 void cpufreq_driver_adjust_perf(unsigned int cpu,
2119                                  unsigned long min_perf,
2120                                  unsigned long target_perf,
2121                                  unsigned long capacity)
2122 {
2123         cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2124 }
2125
2126 /**
2127  * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2128  *
2129  * Return 'true' if the ->adjust_perf callback is present for the
2130  * current driver or 'false' otherwise.
2131  */
2132 bool cpufreq_driver_has_adjust_perf(void)
2133 {
2134         return !!cpufreq_driver->adjust_perf;
2135 }
2136
2137 /* Must set freqs->new to intermediate frequency */
2138 static int __target_intermediate(struct cpufreq_policy *policy,
2139                                  struct cpufreq_freqs *freqs, int index)
2140 {
2141         int ret;
2142
2143         freqs->new = cpufreq_driver->get_intermediate(policy, index);
2144
2145         /* We don't need to switch to intermediate freq */
2146         if (!freqs->new)
2147                 return 0;
2148
2149         pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2150                  __func__, policy->cpu, freqs->old, freqs->new);
2151
2152         cpufreq_freq_transition_begin(policy, freqs);
2153         ret = cpufreq_driver->target_intermediate(policy, index);
2154         cpufreq_freq_transition_end(policy, freqs, ret);
2155
2156         if (ret)
2157                 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2158                        __func__, ret);
2159
2160         return ret;
2161 }
2162
2163 static int __target_index(struct cpufreq_policy *policy, int index)
2164 {
2165         struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2166         unsigned int restore_freq, intermediate_freq = 0;
2167         unsigned int newfreq = policy->freq_table[index].frequency;
2168         int retval = -EINVAL;
2169         bool notify;
2170
2171         if (newfreq == policy->cur)
2172                 return 0;
2173
2174         /* Save last value to restore later on errors */
2175         restore_freq = policy->cur;
2176
2177         notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2178         if (notify) {
2179                 /* Handle switching to intermediate frequency */
2180                 if (cpufreq_driver->get_intermediate) {
2181                         retval = __target_intermediate(policy, &freqs, index);
2182                         if (retval)
2183                                 return retval;
2184
2185                         intermediate_freq = freqs.new;
2186                         /* Set old freq to intermediate */
2187                         if (intermediate_freq)
2188                                 freqs.old = freqs.new;
2189                 }
2190
2191                 freqs.new = newfreq;
2192                 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2193                          __func__, policy->cpu, freqs.old, freqs.new);
2194
2195                 cpufreq_freq_transition_begin(policy, &freqs);
2196         }
2197
2198         retval = cpufreq_driver->target_index(policy, index);
2199         if (retval)
2200                 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2201                        retval);
2202
2203         if (notify) {
2204                 cpufreq_freq_transition_end(policy, &freqs, retval);
2205
2206                 /*
2207                  * Failed after setting to intermediate freq? Driver should have
2208                  * reverted back to initial frequency and so should we. Check
2209                  * here for intermediate_freq instead of get_intermediate, in
2210                  * case we haven't switched to intermediate freq at all.
2211                  */
2212                 if (unlikely(retval && intermediate_freq)) {
2213                         freqs.old = intermediate_freq;
2214                         freqs.new = restore_freq;
2215                         cpufreq_freq_transition_begin(policy, &freqs);
2216                         cpufreq_freq_transition_end(policy, &freqs, 0);
2217                 }
2218         }
2219
2220         return retval;
2221 }
2222
2223 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2224                             unsigned int target_freq,
2225                             unsigned int relation)
2226 {
2227         unsigned int old_target_freq = target_freq;
2228         int index;
2229
2230         if (cpufreq_disabled())
2231                 return -ENODEV;
2232
2233         /* Make sure that target_freq is within supported range */
2234         target_freq = clamp_val(target_freq, policy->min, policy->max);
2235
2236         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2237                  policy->cpu, target_freq, relation, old_target_freq);
2238
2239         /*
2240          * This might look like a redundant call as we are checking it again
2241          * after finding index. But it is left intentionally for cases where
2242          * exactly same freq is called again and so we can save on few function
2243          * calls.
2244          */
2245         if (target_freq == policy->cur &&
2246             !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2247                 return 0;
2248
2249         if (cpufreq_driver->target)
2250                 return cpufreq_driver->target(policy, target_freq, relation);
2251
2252         if (!cpufreq_driver->target_index)
2253                 return -EINVAL;
2254
2255         index = cpufreq_frequency_table_target(policy, target_freq, relation);
2256
2257         return __target_index(policy, index);
2258 }
2259 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2260
2261 int cpufreq_driver_target(struct cpufreq_policy *policy,
2262                           unsigned int target_freq,
2263                           unsigned int relation)
2264 {
2265         int ret;
2266
2267         down_write(&policy->rwsem);
2268
2269         ret = __cpufreq_driver_target(policy, target_freq, relation);
2270
2271         up_write(&policy->rwsem);
2272
2273         return ret;
2274 }
2275 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2276
2277 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2278 {
2279         return NULL;
2280 }
2281
2282 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2283 {
2284         int ret;
2285
2286         /* Don't start any governor operations if we are entering suspend */
2287         if (cpufreq_suspended)
2288                 return 0;
2289         /*
2290          * Governor might not be initiated here if ACPI _PPC changed
2291          * notification happened, so check it.
2292          */
2293         if (!policy->governor)
2294                 return -EINVAL;
2295
2296         /* Platform doesn't want dynamic frequency switching ? */
2297         if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2298             cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2299                 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2300
2301                 if (gov) {
2302                         pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2303                                 policy->governor->name, gov->name);
2304                         policy->governor = gov;
2305                 } else {
2306                         return -EINVAL;
2307                 }
2308         }
2309
2310         if (!try_module_get(policy->governor->owner))
2311                 return -EINVAL;
2312
2313         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2314
2315         if (policy->governor->init) {
2316                 ret = policy->governor->init(policy);
2317                 if (ret) {
2318                         module_put(policy->governor->owner);
2319                         return ret;
2320                 }
2321         }
2322
2323         policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2324
2325         return 0;
2326 }
2327
2328 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2329 {
2330         if (cpufreq_suspended || !policy->governor)
2331                 return;
2332
2333         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2334
2335         if (policy->governor->exit)
2336                 policy->governor->exit(policy);
2337
2338         module_put(policy->governor->owner);
2339 }
2340
2341 int cpufreq_start_governor(struct cpufreq_policy *policy)
2342 {
2343         int ret;
2344
2345         if (cpufreq_suspended)
2346                 return 0;
2347
2348         if (!policy->governor)
2349                 return -EINVAL;
2350
2351         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2352
2353         if (cpufreq_driver->get)
2354                 cpufreq_verify_current_freq(policy, false);
2355
2356         if (policy->governor->start) {
2357                 ret = policy->governor->start(policy);
2358                 if (ret)
2359                         return ret;
2360         }
2361
2362         if (policy->governor->limits)
2363                 policy->governor->limits(policy);
2364
2365         return 0;
2366 }
2367
2368 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2369 {
2370         if (cpufreq_suspended || !policy->governor)
2371                 return;
2372
2373         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2374
2375         if (policy->governor->stop)
2376                 policy->governor->stop(policy);
2377 }
2378
2379 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2380 {
2381         if (cpufreq_suspended || !policy->governor)
2382                 return;
2383
2384         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2385
2386         if (policy->governor->limits)
2387                 policy->governor->limits(policy);
2388 }
2389
2390 int cpufreq_register_governor(struct cpufreq_governor *governor)
2391 {
2392         int err;
2393
2394         if (!governor)
2395                 return -EINVAL;
2396
2397         if (cpufreq_disabled())
2398                 return -ENODEV;
2399
2400         mutex_lock(&cpufreq_governor_mutex);
2401
2402         err = -EBUSY;
2403         if (!find_governor(governor->name)) {
2404                 err = 0;
2405                 list_add(&governor->governor_list, &cpufreq_governor_list);
2406         }
2407
2408         mutex_unlock(&cpufreq_governor_mutex);
2409         return err;
2410 }
2411 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2412
2413 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2414 {
2415         struct cpufreq_policy *policy;
2416         unsigned long flags;
2417
2418         if (!governor)
2419                 return;
2420
2421         if (cpufreq_disabled())
2422                 return;
2423
2424         /* clear last_governor for all inactive policies */
2425         read_lock_irqsave(&cpufreq_driver_lock, flags);
2426         for_each_inactive_policy(policy) {
2427                 if (!strcmp(policy->last_governor, governor->name)) {
2428                         policy->governor = NULL;
2429                         strcpy(policy->last_governor, "\0");
2430                 }
2431         }
2432         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2433
2434         mutex_lock(&cpufreq_governor_mutex);
2435         list_del(&governor->governor_list);
2436         mutex_unlock(&cpufreq_governor_mutex);
2437 }
2438 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2439
2440
2441 /*********************************************************************
2442  *                          POLICY INTERFACE                         *
2443  *********************************************************************/
2444
2445 /**
2446  * cpufreq_get_policy - get the current cpufreq_policy
2447  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2448  *      is written
2449  * @cpu: CPU to find the policy for
2450  *
2451  * Reads the current cpufreq policy.
2452  */
2453 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2454 {
2455         struct cpufreq_policy *cpu_policy;
2456         if (!policy)
2457                 return -EINVAL;
2458
2459         cpu_policy = cpufreq_cpu_get(cpu);
2460         if (!cpu_policy)
2461                 return -EINVAL;
2462
2463         memcpy(policy, cpu_policy, sizeof(*policy));
2464
2465         cpufreq_cpu_put(cpu_policy);
2466         return 0;
2467 }
2468 EXPORT_SYMBOL(cpufreq_get_policy);
2469
2470 /**
2471  * cpufreq_set_policy - Modify cpufreq policy parameters.
2472  * @policy: Policy object to modify.
2473  * @new_gov: Policy governor pointer.
2474  * @new_pol: Policy value (for drivers with built-in governors).
2475  *
2476  * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2477  * limits to be set for the policy, update @policy with the verified limits
2478  * values and either invoke the driver's ->setpolicy() callback (if present) or
2479  * carry out a governor update for @policy.  That is, run the current governor's
2480  * ->limits() callback (if @new_gov points to the same object as the one in
2481  * @policy) or replace the governor for @policy with @new_gov.
2482  *
2483  * The cpuinfo part of @policy is not updated by this function.
2484  */
2485 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2486                               struct cpufreq_governor *new_gov,
2487                               unsigned int new_pol)
2488 {
2489         struct cpufreq_policy_data new_data;
2490         struct cpufreq_governor *old_gov;
2491         int ret;
2492
2493         memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2494         new_data.freq_table = policy->freq_table;
2495         new_data.cpu = policy->cpu;
2496         /*
2497          * PM QoS framework collects all the requests from users and provide us
2498          * the final aggregated value here.
2499          */
2500         new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2501         new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2502
2503         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2504                  new_data.cpu, new_data.min, new_data.max);
2505
2506         /*
2507          * Verify that the CPU speed can be set within these limits and make sure
2508          * that min <= max.
2509          */
2510         ret = cpufreq_driver->verify(&new_data);
2511         if (ret)
2512                 return ret;
2513
2514         policy->min = new_data.min;
2515         policy->max = new_data.max;
2516         trace_cpu_frequency_limits(policy);
2517
2518         policy->cached_target_freq = UINT_MAX;
2519
2520         pr_debug("new min and max freqs are %u - %u kHz\n",
2521                  policy->min, policy->max);
2522
2523         if (cpufreq_driver->setpolicy) {
2524                 policy->policy = new_pol;
2525                 pr_debug("setting range\n");
2526                 return cpufreq_driver->setpolicy(policy);
2527         }
2528
2529         if (new_gov == policy->governor) {
2530                 pr_debug("governor limits update\n");
2531                 cpufreq_governor_limits(policy);
2532                 return 0;
2533         }
2534
2535         pr_debug("governor switch\n");
2536
2537         /* save old, working values */
2538         old_gov = policy->governor;
2539         /* end old governor */
2540         if (old_gov) {
2541                 cpufreq_stop_governor(policy);
2542                 cpufreq_exit_governor(policy);
2543         }
2544
2545         /* start new governor */
2546         policy->governor = new_gov;
2547         ret = cpufreq_init_governor(policy);
2548         if (!ret) {
2549                 ret = cpufreq_start_governor(policy);
2550                 if (!ret) {
2551                         pr_debug("governor change\n");
2552                         sched_cpufreq_governor_change(policy, old_gov);
2553                         return 0;
2554                 }
2555                 cpufreq_exit_governor(policy);
2556         }
2557
2558         /* new governor failed, so re-start old one */
2559         pr_debug("starting governor %s failed\n", policy->governor->name);
2560         if (old_gov) {
2561                 policy->governor = old_gov;
2562                 if (cpufreq_init_governor(policy))
2563                         policy->governor = NULL;
2564                 else
2565                         cpufreq_start_governor(policy);
2566         }
2567
2568         return ret;
2569 }
2570
2571 /**
2572  * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2573  * @cpu: CPU to re-evaluate the policy for.
2574  *
2575  * Update the current frequency for the cpufreq policy of @cpu and use
2576  * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2577  * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2578  * for the policy in question, among other things.
2579  */
2580 void cpufreq_update_policy(unsigned int cpu)
2581 {
2582         struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2583
2584         if (!policy)
2585                 return;
2586
2587         /*
2588          * BIOS might change freq behind our back
2589          * -> ask driver for current freq and notify governors about a change
2590          */
2591         if (cpufreq_driver->get && has_target() &&
2592             (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2593                 goto unlock;
2594
2595         refresh_frequency_limits(policy);
2596
2597 unlock:
2598         cpufreq_cpu_release(policy);
2599 }
2600 EXPORT_SYMBOL(cpufreq_update_policy);
2601
2602 /**
2603  * cpufreq_update_limits - Update policy limits for a given CPU.
2604  * @cpu: CPU to update the policy limits for.
2605  *
2606  * Invoke the driver's ->update_limits callback if present or call
2607  * cpufreq_update_policy() for @cpu.
2608  */
2609 void cpufreq_update_limits(unsigned int cpu)
2610 {
2611         if (cpufreq_driver->update_limits)
2612                 cpufreq_driver->update_limits(cpu);
2613         else
2614                 cpufreq_update_policy(cpu);
2615 }
2616 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2617
2618 /*********************************************************************
2619  *               BOOST                                               *
2620  *********************************************************************/
2621 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2622 {
2623         int ret;
2624
2625         if (!policy->freq_table)
2626                 return -ENXIO;
2627
2628         ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2629         if (ret) {
2630                 pr_err("%s: Policy frequency update failed\n", __func__);
2631                 return ret;
2632         }
2633
2634         ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2635         if (ret < 0)
2636                 return ret;
2637
2638         return 0;
2639 }
2640
2641 int cpufreq_boost_trigger_state(int state)
2642 {
2643         struct cpufreq_policy *policy;
2644         unsigned long flags;
2645         int ret = 0;
2646
2647         if (cpufreq_driver->boost_enabled == state)
2648                 return 0;
2649
2650         write_lock_irqsave(&cpufreq_driver_lock, flags);
2651         cpufreq_driver->boost_enabled = state;
2652         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2653
2654         get_online_cpus();
2655         for_each_active_policy(policy) {
2656                 ret = cpufreq_driver->set_boost(policy, state);
2657                 if (ret)
2658                         goto err_reset_state;
2659         }
2660         put_online_cpus();
2661
2662         return 0;
2663
2664 err_reset_state:
2665         put_online_cpus();
2666
2667         write_lock_irqsave(&cpufreq_driver_lock, flags);
2668         cpufreq_driver->boost_enabled = !state;
2669         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2670
2671         pr_err("%s: Cannot %s BOOST\n",
2672                __func__, state ? "enable" : "disable");
2673
2674         return ret;
2675 }
2676
2677 static bool cpufreq_boost_supported(void)
2678 {
2679         return cpufreq_driver->set_boost;
2680 }
2681
2682 static int create_boost_sysfs_file(void)
2683 {
2684         int ret;
2685
2686         ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2687         if (ret)
2688                 pr_err("%s: cannot register global BOOST sysfs file\n",
2689                        __func__);
2690
2691         return ret;
2692 }
2693
2694 static void remove_boost_sysfs_file(void)
2695 {
2696         if (cpufreq_boost_supported())
2697                 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2698 }
2699
2700 int cpufreq_enable_boost_support(void)
2701 {
2702         if (!cpufreq_driver)
2703                 return -EINVAL;
2704
2705         if (cpufreq_boost_supported())
2706                 return 0;
2707
2708         cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2709
2710         /* This will get removed on driver unregister */
2711         return create_boost_sysfs_file();
2712 }
2713 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2714
2715 int cpufreq_boost_enabled(void)
2716 {
2717         return cpufreq_driver->boost_enabled;
2718 }
2719 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2720
2721 /*********************************************************************
2722  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2723  *********************************************************************/
2724 static enum cpuhp_state hp_online;
2725
2726 static int cpuhp_cpufreq_online(unsigned int cpu)
2727 {
2728         cpufreq_online(cpu);
2729
2730         return 0;
2731 }
2732
2733 static int cpuhp_cpufreq_offline(unsigned int cpu)
2734 {
2735         cpufreq_offline(cpu);
2736
2737         return 0;
2738 }
2739
2740 /**
2741  * cpufreq_register_driver - register a CPU Frequency driver
2742  * @driver_data: A struct cpufreq_driver containing the values#
2743  * submitted by the CPU Frequency driver.
2744  *
2745  * Registers a CPU Frequency driver to this core code. This code
2746  * returns zero on success, -EEXIST when another driver got here first
2747  * (and isn't unregistered in the meantime).
2748  *
2749  */
2750 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2751 {
2752         unsigned long flags;
2753         int ret;
2754
2755         if (cpufreq_disabled())
2756                 return -ENODEV;
2757
2758         /*
2759          * The cpufreq core depends heavily on the availability of device
2760          * structure, make sure they are available before proceeding further.
2761          */
2762         if (!get_cpu_device(0))
2763                 return -EPROBE_DEFER;
2764
2765         if (!driver_data || !driver_data->verify || !driver_data->init ||
2766             !(driver_data->setpolicy || driver_data->target_index ||
2767                     driver_data->target) ||
2768              (driver_data->setpolicy && (driver_data->target_index ||
2769                     driver_data->target)) ||
2770              (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2771              (!driver_data->online != !driver_data->offline))
2772                 return -EINVAL;
2773
2774         pr_debug("trying to register driver %s\n", driver_data->name);
2775
2776         /* Protect against concurrent CPU online/offline. */
2777         cpus_read_lock();
2778
2779         write_lock_irqsave(&cpufreq_driver_lock, flags);
2780         if (cpufreq_driver) {
2781                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2782                 ret = -EEXIST;
2783                 goto out;
2784         }
2785         cpufreq_driver = driver_data;
2786         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2787
2788         /*
2789          * Mark support for the scheduler's frequency invariance engine for
2790          * drivers that implement target(), target_index() or fast_switch().
2791          */
2792         if (!cpufreq_driver->setpolicy) {
2793                 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2794                 pr_debug("supports frequency invariance");
2795         }
2796
2797         if (driver_data->setpolicy)
2798                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2799
2800         if (cpufreq_boost_supported()) {
2801                 ret = create_boost_sysfs_file();
2802                 if (ret)
2803                         goto err_null_driver;
2804         }
2805
2806         ret = subsys_interface_register(&cpufreq_interface);
2807         if (ret)
2808                 goto err_boost_unreg;
2809
2810         if (unlikely(list_empty(&cpufreq_policy_list))) {
2811                 /* if all ->init() calls failed, unregister */
2812                 ret = -ENODEV;
2813                 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2814                          driver_data->name);
2815                 goto err_if_unreg;
2816         }
2817
2818         ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2819                                                    "cpufreq:online",
2820                                                    cpuhp_cpufreq_online,
2821                                                    cpuhp_cpufreq_offline);
2822         if (ret < 0)
2823                 goto err_if_unreg;
2824         hp_online = ret;
2825         ret = 0;
2826
2827         pr_debug("driver %s up and running\n", driver_data->name);
2828         goto out;
2829
2830 err_if_unreg:
2831         subsys_interface_unregister(&cpufreq_interface);
2832 err_boost_unreg:
2833         remove_boost_sysfs_file();
2834 err_null_driver:
2835         write_lock_irqsave(&cpufreq_driver_lock, flags);
2836         cpufreq_driver = NULL;
2837         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2838 out:
2839         cpus_read_unlock();
2840         return ret;
2841 }
2842 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2843
2844 /*
2845  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2846  *
2847  * Unregister the current CPUFreq driver. Only call this if you have
2848  * the right to do so, i.e. if you have succeeded in initialising before!
2849  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2850  * currently not initialised.
2851  */
2852 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2853 {
2854         unsigned long flags;
2855
2856         if (!cpufreq_driver || (driver != cpufreq_driver))
2857                 return -EINVAL;
2858
2859         pr_debug("unregistering driver %s\n", driver->name);
2860
2861         /* Protect against concurrent cpu hotplug */
2862         cpus_read_lock();
2863         subsys_interface_unregister(&cpufreq_interface);
2864         remove_boost_sysfs_file();
2865         static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2866         cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2867
2868         write_lock_irqsave(&cpufreq_driver_lock, flags);
2869
2870         cpufreq_driver = NULL;
2871
2872         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2873         cpus_read_unlock();
2874
2875         return 0;
2876 }
2877 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2878
2879 static int __init cpufreq_core_init(void)
2880 {
2881         struct cpufreq_governor *gov = cpufreq_default_governor();
2882
2883         if (cpufreq_disabled())
2884                 return -ENODEV;
2885
2886         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2887         BUG_ON(!cpufreq_global_kobject);
2888
2889         if (!strlen(default_governor))
2890                 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2891
2892         return 0;
2893 }
2894 module_param(off, int, 0444);
2895 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2896 core_initcall(cpufreq_core_init);