Merge branches 'pm-em', 'pm-powercap' and 'pm-sleep'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 19 Mar 2024 12:25:49 +0000 (13:25 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 19 Mar 2024 12:25:49 +0000 (13:25 +0100)
Merge additional updates related to the Energy Model, power capping
and system-wide power management for 6.9-rc1:

 - Modify the Energy Model code to bail out and complain if the unit of
   power is not uW to prevent errors due to unit mismatches (Lukasz
   Luba).

 - Make the intel_rapl platform driver use a remove callback returning
   void (Uwe Kleine-König).

 - Fix typo in the suspend and interrupts document (Saravana Kannan).

* pm-em:
  PM: EM: Force device drivers to provide power in uW

* pm-powercap:
  powercap: intel_rapl: Convert to platform remove callback returning void

* pm-sleep:
  Documentation: power: Fix typo in suspend and interrupts doc

Documentation/power/suspend-and-interrupts.rst
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/scmi-cpufreq.c
drivers/firmware/arm_scmi/perf.c
drivers/powercap/intel_rapl_msr.c

index dfbace2f4600c8dbbdaa58a748e6377577685f8c..f588feeecad0fccb7d8f6a169cb93d24a273960a 100644 (file)
@@ -78,7 +78,7 @@ handling the given IRQ as a system wakeup interrupt line and disable_irq_wake()
 turns that logic off.
 
 Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ
-in a special way.  Namely, the IRQ remains enabled, by on the first interrupt
+in a special way.  Namely, the IRQ remains enabled, but on the first interrupt
 it will be disabled, marked as pending and "suspended" so that it will be
 re-enabled by resume_device_irqs() during the subsequent system resume.  Also
 the PM core is notified about the event which causes the system suspend in
index 8bd6e5e8f121ce89fdc80af0caa66fd6944ef593..2d83bbc65dd0bd69c8b035790ddc1217ddde9fb4 100644 (file)
@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
        if (!priv)
                return -ENOMEM;
 
-       if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
                return -ENOMEM;
 
        cpumask_set_cpu(cpu, priv->cpus);
index f6f8d7f450e7f5fbab2a7b11a67864c78f621f72..66e10a19d76abc3476282c69060c0cac4a815084 100644 (file)
@@ -653,14 +653,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
        if (policy->boost_enabled == enable)
                return count;
 
+       policy->boost_enabled = enable;
+
        cpus_read_lock();
        ret = cpufreq_driver->set_boost(policy, enable);
        cpus_read_unlock();
 
-       if (ret)
+       if (ret) {
+               policy->boost_enabled = !policy->boost_enabled;
                return ret;
-
-       policy->boost_enabled = enable;
+       }
 
        return count;
 }
@@ -1428,6 +1430,9 @@ static int cpufreq_online(unsigned int cpu)
                        goto out_free_policy;
                }
 
+               /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+               policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
+
                /*
                 * The initialization has succeeded and the policy is online.
                 * If there is a problem with its frequency table, take it
@@ -2769,11 +2774,12 @@ int cpufreq_boost_trigger_state(int state)
 
        cpus_read_lock();
        for_each_active_policy(policy) {
+               policy->boost_enabled = state;
                ret = cpufreq_driver->set_boost(policy, state);
-               if (ret)
+               if (ret) {
+                       policy->boost_enabled = !policy->boost_enabled;
                        goto err_reset_state;
-
-               policy->boost_enabled = state;
+               }
        }
        cpus_read_unlock();
 
index c4d4643b6ca6507ca85b134ecadcd0f1e0d82a5b..c17dc51a5a022d4b85558f69f3c21ec27ec2ba00 100644 (file)
@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
        cpufreq_for_each_valid_entry(pos, table) {
                freq = pos->frequency;
 
-               if (!cpufreq_boost_enabled()
+               if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
                    && (pos->flags & CPUFREQ_BOOST_FREQ))
                        continue;
 
index 0b483bd0d3ca6c5ca1668c6d87d431888ca26df1..3b4f6bfb2f4cf3ee2184948d2b991e3d5f196153 100644 (file)
@@ -30,6 +30,7 @@ struct scmi_data {
 
 static struct scmi_protocol_handle *ph;
 static const struct scmi_perf_proto_ops *perf_ops;
+static struct cpufreq_driver scmi_cpufreq_driver;
 
 static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
 {
@@ -167,6 +168,12 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
        return rate_limit;
 }
 
+static struct freq_attr *scmi_cpufreq_hw_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,
+       NULL,
+};
+
 static int scmi_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret, nr_opp, domain;
@@ -276,6 +283,17 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
        policy->transition_delay_us =
                scmi_get_rate_limit(domain, policy->fast_switch_possible);
 
+       if (policy_has_boost_freq(policy)) {
+               ret = cpufreq_enable_boost_support();
+               if (ret) {
+                       dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+                       goto out_free_opp;
+               } else {
+                       scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+                       scmi_cpufreq_driver.boost_enabled = true;
+               }
+       }
+
        return 0;
 
 out_free_opp:
@@ -334,7 +352,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
                  CPUFREQ_NEED_INITIAL_FREQ_CHECK |
                  CPUFREQ_IS_COOLING_DEV,
        .verify = cpufreq_generic_frequency_table_verify,
-       .attr   = cpufreq_generic_attr,
+       .attr   = scmi_cpufreq_hw_attr,
        .target_index   = scmi_cpufreq_set_target,
        .fast_switch    = scmi_cpufreq_fast_switch,
        .get    = scmi_cpufreq_get_rate,
index 8e832d1ad8251eed9e4007a566eb6a70d8af548d..345fff167b52f5e1b87ced8cd854e96702e0734d 100644 (file)
@@ -871,6 +871,9 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
                else
                        freq = dom->opp[idx].indicative_freq * dom->mult_factor;
 
+               /* All OPPs above the sustained frequency are treated as turbo */
+               data.turbo = freq > dom->sustained_freq_khz * 1000;
+
                data.level = dom->opp[idx].perf;
                data.freq = freq;
 
index b4b6930cacb0b11b016c771b2301a4c8f416f707..35cb152fa9aa7aa703a6c9138966e1aaf31364ac 100644 (file)
@@ -197,11 +197,10 @@ out:
        return ret;
 }
 
-static int rapl_msr_remove(struct platform_device *pdev)
+static void rapl_msr_remove(struct platform_device *pdev)
 {
        cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online);
        powercap_unregister_control_type(rapl_msr_priv->control_type);
-       return 0;
 }
 
 static const struct platform_device_id rapl_msr_ids[] = {
@@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
 
 static struct platform_driver intel_rapl_msr_driver = {
        .probe = rapl_msr_probe,
-       .remove = rapl_msr_remove,
+       .remove_new = rapl_msr_remove,
        .id_table = rapl_msr_ids,
        .driver = {
                .name = "intel_rapl_msr",