Merge branch 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 14 Dec 2020 19:26:17 +0000 (20:26 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 14 Dec 2020 19:26:17 +0000 (20:26 +0100)
Pull OPP (Operating Performance Points) updates for 5.11-rc1 from
Viresh Kumar:

"This contains the following updates:

 - Allow empty (node-less) OPP tables in DT for passing just the
   dependency related information (Nicola Mazzucato).

 - Fix a potential lockdep in OPP core and other OPP core cleanups
   (Viresh Kumar).

 - Don't abuse dev_pm_opp_get_opp_table() to create an OPP table, fix
   cpufreq-dt driver for the same (Viresh Kumar).

 - dev_pm_opp_put_regulators() accepts a NULL argument now, updates to
   all the users as well (Viresh Kumar)."

* 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
  opp: of: Allow empty opp-table with opp-shared
  dt-bindings: opp: Allow empty OPP tables
  media: venus: dev_pm_opp_put_*() accepts NULL argument
  drm/panfrost: dev_pm_opp_put_*() accepts NULL argument
  drm/lima: dev_pm_opp_put_*() accepts NULL argument
  PM / devfreq: exynos: dev_pm_opp_put_*() accepts NULL argument
  cpufreq: qcom-cpufreq-nvmem: dev_pm_opp_put_*() accepts NULL argument
  cpufreq: dt: dev_pm_opp_put_regulators() accepts NULL argument
  opp: Allow dev_pm_opp_put_*() APIs to accept NULL opp_table
  opp: Don't create an OPP table from dev_pm_opp_get_opp_table()
  cpufreq: dt: Don't (ab)use dev_pm_opp_get_opp_table() to create OPP table
  opp: Reduce the size of critical section in _opp_kref_release()
  opp: Don't return opp_dev from _find_opp_dev()
  opp: Allocate the OPP table outside of opp_table_lock
  opp: Always add entries in dev_list with opp_table->lock held

12 files changed:
Documentation/devicetree/bindings/opp/opp.txt
drivers/base/power/domain.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/qcom-cpufreq-nvmem.c
drivers/devfreq/exynos-bus.c
drivers/gpu/drm/lima/lima_devfreq.c
drivers/gpu/drm/panfrost/panfrost_devfreq.c
drivers/media/platform/qcom/venus/pm_helpers.c
drivers/opp/core.c
drivers/opp/of.c
drivers/opp/opp.h
include/linux/pm_opp.h

index 9847dfeeffcba0bb4423c0418e4007760be00f96..08b3da4736cf29afa0927ff9706cc23cab0eb5ed 100644 (file)
@@ -65,7 +65,9 @@ Required properties:
 
 - OPP nodes: One or more OPP nodes describing voltage-current-frequency
   combinations. Their name isn't significant but their phandle can be used to
-  reference an OPP.
+  reference an OPP. These are mandatory except for the case where the OPP table
+  is present only to indicate dependency between devices using the opp-shared
+  property.
 
 Optional properties:
 - opp-shared: Indicates that device nodes using this OPP Table Node's phandle
@@ -568,3 +570,53 @@ Example 6: opp-microvolt-<name>, opp-microamp-<name>:
                };
        };
 };
+
+Example 7: Single cluster Quad-core ARM cortex A53, OPP points from firmware,
+distinct clock controls but two sets of clock/voltage/current lines.
+
+/ {
+       cpus {
+               #address-cells = <2>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a53";
+                       reg = <0x0 0x100>;
+                       next-level-cache = <&A53_L2>;
+                       clocks = <&dvfs_controller 0>;
+                       operating-points-v2 = <&cpu_opp0_table>;
+               };
+               cpu@1 {
+                       compatible = "arm,cortex-a53";
+                       reg = <0x0 0x101>;
+                       next-level-cache = <&A53_L2>;
+                       clocks = <&dvfs_controller 1>;
+                       operating-points-v2 = <&cpu_opp0_table>;
+               };
+               cpu@2 {
+                       compatible = "arm,cortex-a53";
+                       reg = <0x0 0x102>;
+                       next-level-cache = <&A53_L2>;
+                       clocks = <&dvfs_controller 2>;
+                       operating-points-v2 = <&cpu_opp1_table>;
+               };
+               cpu@3 {
+                       compatible = "arm,cortex-a53";
+                       reg = <0x0 0x103>;
+                       next-level-cache = <&A53_L2>;
+                       clocks = <&dvfs_controller 3>;
+                       operating-points-v2 = <&cpu_opp1_table>;
+               };
+
+       };
+
+       cpu_opp0_table: opp0_table {
+               compatible = "operating-points-v2";
+               opp-shared;
+       };
+
+       cpu_opp1_table: opp1_table {
+               compatible = "operating-points-v2";
+               opp-shared;
+       };
+};
index 743268996336db7dedc1f1bba6393ded3e6fd26b..92b750b865d5f6fe10f21d6a0699bf304122d932 100644 (file)
@@ -2249,7 +2249,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
                         * Save table for faster processing while setting
                         * performance state.
                         */
-                       genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
+                       genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
                        WARN_ON(IS_ERR(genpd->opp_table));
                }
 
index e363ae04aac62e49f9c1c376452f25927b207263..ad4234518ef64c72f8c4c6491ff9eedd2ee8ef3a 100644 (file)
@@ -30,7 +30,7 @@ struct private_data {
        cpumask_var_t cpus;
        struct device *cpu_dev;
        struct opp_table *opp_table;
-       struct opp_table *reg_opp_table;
+       struct cpufreq_frequency_table *freq_table;
        bool have_static_opps;
 };
 
@@ -102,7 +102,6 @@ node_put:
 
 static int cpufreq_init(struct cpufreq_policy *policy)
 {
-       struct cpufreq_frequency_table *freq_table;
        struct private_data *priv;
        struct device *cpu_dev;
        struct clk *cpu_clk;
@@ -114,9 +113,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                pr_err("failed to find data for cpu%d\n", policy->cpu);
                return -ENODEV;
        }
-
        cpu_dev = priv->cpu_dev;
-       cpumask_copy(policy->cpus, priv->cpus);
 
        cpu_clk = clk_get(cpu_dev, NULL);
        if (IS_ERR(cpu_clk)) {
@@ -125,67 +122,32 @@ static int cpufreq_init(struct cpufreq_policy *policy)
                return ret;
        }
 
-       /*
-        * Initialize OPP tables for all policy->cpus. They will be shared by
-        * all CPUs which have marked their CPUs shared with OPP bindings.
-        *
-        * For platforms not using operating-points-v2 bindings, we do this
-        * before updating policy->cpus. Otherwise, we will end up creating
-        * duplicate OPPs for policy->cpus.
-        *
-        * OPPs might be populated at runtime, don't check for error here
-        */
-       if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
-               priv->have_static_opps = true;
-
-       /*
-        * But we need OPP table to function so if it is not there let's
-        * give platform code chance to provide it for us.
-        */
-       ret = dev_pm_opp_get_opp_count(cpu_dev);
-       if (ret <= 0) {
-               dev_err(cpu_dev, "OPP table can't be empty\n");
-               ret = -ENODEV;
-               goto out_free_opp;
-       }
-
-       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
-       if (ret) {
-               dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
-               goto out_free_opp;
-       }
+       transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+       if (!transition_latency)
+               transition_latency = CPUFREQ_ETERNAL;
 
+       cpumask_copy(policy->cpus, priv->cpus);
        policy->driver_data = priv;
        policy->clk = cpu_clk;
-       policy->freq_table = freq_table;
-
+       policy->freq_table = priv->freq_table;
        policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
+       policy->cpuinfo.transition_latency = transition_latency;
+       policy->dvfs_possible_from_any_cpu = true;
 
        /* Support turbo/boost mode */
        if (policy_has_boost_freq(policy)) {
                /* This gets disabled by core on driver unregister */
                ret = cpufreq_enable_boost_support();
                if (ret)
-                       goto out_free_cpufreq_table;
+                       goto out_clk_put;
                cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
        }
 
-       transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
-       if (!transition_latency)
-               transition_latency = CPUFREQ_ETERNAL;
-
-       policy->cpuinfo.transition_latency = transition_latency;
-       policy->dvfs_possible_from_any_cpu = true;
-
        dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
 
        return 0;
 
-out_free_cpufreq_table:
-       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
-out_free_opp:
-       if (priv->have_static_opps)
-               dev_pm_opp_of_cpumask_remove_table(policy->cpus);
+out_clk_put:
        clk_put(cpu_clk);
 
        return ret;
@@ -208,11 +170,6 @@ static int cpufreq_offline(struct cpufreq_policy *policy)
 
 static int cpufreq_exit(struct cpufreq_policy *policy)
 {
-       struct private_data *priv = policy->driver_data;
-
-       dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-       if (priv->have_static_opps)
-               dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
        clk_put(policy->clk);
        return 0;
 }
@@ -236,6 +193,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
 {
        struct private_data *priv;
        struct device *cpu_dev;
+       bool fallback = false;
        const char *reg_name;
        int ret;
 
@@ -254,68 +212,86 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
        if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
                return -ENOMEM;
 
+       cpumask_set_cpu(cpu, priv->cpus);
        priv->cpu_dev = cpu_dev;
 
-       /* Try to get OPP table early to ensure resources are available */
-       priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev);
-       if (IS_ERR(priv->opp_table)) {
-               ret = PTR_ERR(priv->opp_table);
-               if (ret != -EPROBE_DEFER)
-                       dev_err(cpu_dev, "failed to get OPP table: %d\n", ret);
-               goto free_cpumask;
-       }
-
        /*
         * OPP layer will be taking care of regulators now, but it needs to know
         * the name of the regulator first.
         */
        reg_name = find_supply_name(cpu_dev);
        if (reg_name) {
-               priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev,
-                                                               &reg_name, 1);
-               if (IS_ERR(priv->reg_opp_table)) {
-                       ret = PTR_ERR(priv->reg_opp_table);
+               priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
+                                                           1);
+               if (IS_ERR(priv->opp_table)) {
+                       ret = PTR_ERR(priv->opp_table);
                        if (ret != -EPROBE_DEFER)
                                dev_err(cpu_dev, "failed to set regulators: %d\n",
                                        ret);
-                       goto put_table;
+                       goto free_cpumask;
                }
        }
 
-       /* Find OPP sharing information so we can fill pri->cpus here */
        /* Get OPP-sharing information from "operating-points-v2" bindings */
        ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
        if (ret) {
                if (ret != -ENOENT)
-                       goto put_reg;
+                       goto out;
 
                /*
                 * operating-points-v2 not supported, fallback to all CPUs share
                 * OPP for backward compatibility if the platform hasn't set
                 * sharing CPUs.
                 */
-               if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) {
-                       cpumask_setall(priv->cpus);
-
-                       /*
-                        * OPP tables are initialized only for cpu, do it for
-                        * others as well.
-                        */
-                       ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
-                       if (ret)
-                               dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
-                                       __func__, ret);
-               }
+               if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
+                       fallback = true;
+       }
+
+       /*
+        * Initialize OPP tables for all priv->cpus. They will be shared by
+        * all CPUs which have marked their CPUs shared with OPP bindings.
+        *
+        * For platforms not using operating-points-v2 bindings, we do this
+        * before updating priv->cpus. Otherwise, we will end up creating
+        * duplicate OPPs for the CPUs.
+        *
+        * OPPs might be populated at runtime, don't check for error here.
+        */
+       if (!dev_pm_opp_of_cpumask_add_table(priv->cpus))
+               priv->have_static_opps = true;
+
+       /*
+        * The OPP table must be initialized, statically or dynamically, by this
+        * point.
+        */
+       ret = dev_pm_opp_get_opp_count(cpu_dev);
+       if (ret <= 0) {
+               dev_err(cpu_dev, "OPP table can't be empty\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       if (fallback) {
+               cpumask_setall(priv->cpus);
+               ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
+               if (ret)
+                       dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+                               __func__, ret);
+       }
+
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
+       if (ret) {
+               dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+               goto out;
        }
 
        list_add(&priv->node, &priv_list);
        return 0;
 
-put_reg:
-       if (priv->reg_opp_table)
-               dev_pm_opp_put_regulators(priv->reg_opp_table);
-put_table:
-       dev_pm_opp_put_opp_table(priv->opp_table);
+out:
+       if (priv->have_static_opps)
+               dev_pm_opp_of_cpumask_remove_table(priv->cpus);
+       dev_pm_opp_put_regulators(priv->opp_table);
 free_cpumask:
        free_cpumask_var(priv->cpus);
        return ret;
@@ -326,9 +302,10 @@ static void dt_cpufreq_release(void)
        struct private_data *priv, *tmp;
 
        list_for_each_entry_safe(priv, tmp, &priv_list, node) {
-               if (priv->reg_opp_table)
-                       dev_pm_opp_put_regulators(priv->reg_opp_table);
-               dev_pm_opp_put_opp_table(priv->opp_table);
+               dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
+               if (priv->have_static_opps)
+                       dev_pm_opp_of_cpumask_remove_table(priv->cpus);
+               dev_pm_opp_put_regulators(priv->opp_table);
                free_cpumask_var(priv->cpus);
                list_del(&priv->node);
        }
index d06b37822c3dff1add1d505474efd9ecb8422f95..747d602f221ea8e62706964c65ce3dd95a09ede0 100644 (file)
@@ -397,19 +397,19 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
 
 free_genpd_opp:
        for_each_possible_cpu(cpu) {
-               if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu]))
+               if (IS_ERR(drv->genpd_opp_tables[cpu]))
                        break;
                dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
        }
        kfree(drv->genpd_opp_tables);
 free_opp:
        for_each_possible_cpu(cpu) {
-               if (IS_ERR_OR_NULL(drv->names_opp_tables[cpu]))
+               if (IS_ERR(drv->names_opp_tables[cpu]))
                        break;
                dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
        }
        for_each_possible_cpu(cpu) {
-               if (IS_ERR_OR_NULL(drv->hw_opp_tables[cpu]))
+               if (IS_ERR(drv->hw_opp_tables[cpu]))
                        break;
                dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
        }
@@ -430,12 +430,9 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
        platform_device_unregister(cpufreq_dt_pdev);
 
        for_each_possible_cpu(cpu) {
-               if (drv->names_opp_tables[cpu])
-                       dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
-               if (drv->hw_opp_tables[cpu])
-                       dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
-               if (drv->genpd_opp_tables[cpu])
-                       dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
+               dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
+               dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
+               dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
        }
 
        kfree(drv->names_opp_tables);
index 1e684a448c9efe898956683113d4f4a1af93bfb3..143fd58ec3dcb893829b68367cf5963476353346 100644 (file)
@@ -158,10 +158,8 @@ static void exynos_bus_exit(struct device *dev)
 
        dev_pm_opp_of_remove_table(dev);
        clk_disable_unprepare(bus->clk);
-       if (bus->opp_table) {
-               dev_pm_opp_put_regulators(bus->opp_table);
-               bus->opp_table = NULL;
-       }
+       dev_pm_opp_put_regulators(bus->opp_table);
+       bus->opp_table = NULL;
 }
 
 static void exynos_bus_passive_exit(struct device *dev)
@@ -444,10 +442,8 @@ err:
        dev_pm_opp_of_remove_table(dev);
        clk_disable_unprepare(bus->clk);
 err_reg:
-       if (!passive) {
-               dev_pm_opp_put_regulators(bus->opp_table);
-               bus->opp_table = NULL;
-       }
+       dev_pm_opp_put_regulators(bus->opp_table);
+       bus->opp_table = NULL;
 
        return ret;
 }
index bbe02817721bcfe14ac9dc337e5adbf1de0ce608..e7b7b8dfd7928e919780a066d57f2ff316cefa81 100644 (file)
@@ -110,15 +110,10 @@ void lima_devfreq_fini(struct lima_device *ldev)
                devfreq->opp_of_table_added = false;
        }
 
-       if (devfreq->regulators_opp_table) {
-               dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
-               devfreq->regulators_opp_table = NULL;
-       }
-
-       if (devfreq->clkname_opp_table) {
-               dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
-               devfreq->clkname_opp_table = NULL;
-       }
+       dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
+       dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
+       devfreq->regulators_opp_table = NULL;
+       devfreq->clkname_opp_table = NULL;
 }
 
 int lima_devfreq_init(struct lima_device *ldev)
index 8ab025d0035f37403e4cbb1b8c4f1c130aefb8ff..97b5abc7c18857313971eb93ba9bd3850c09ccfe 100644 (file)
@@ -170,10 +170,8 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
                pfdevfreq->opp_of_table_added = false;
        }
 
-       if (pfdevfreq->regulators_opp_table) {
-               dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
-               pfdevfreq->regulators_opp_table = NULL;
-       }
+       dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
+       pfdevfreq->regulators_opp_table = NULL;
 }
 
 void panfrost_devfreq_resume(struct panfrost_device *pfdev)
index a9538c2cc3c9dcee80b963c83f6c2578e1f9050a..3a2484184eb45c4e3c107058c6cb953393e16a5f 100644 (file)
@@ -898,8 +898,7 @@ static void core_put_v4(struct device *dev)
 
        if (core->has_opp_table)
                dev_pm_opp_of_remove_table(dev);
-       if (core->opp_table)
-               dev_pm_opp_put_clkname(core->opp_table);
+       dev_pm_opp_put_clkname(core->opp_table);
 
 }
 
index 0e0a5269dc82f8314e936bcc71d457e51d24fb48..4268eb35991523a539e968c321a9693d5aa70809 100644 (file)
 LIST_HEAD(opp_tables);
 /* Lock to allow exclusive modification to the device and opp lists */
 DEFINE_MUTEX(opp_table_lock);
+/* Flag indicating that opp_tables list is being updated at the moment */
+static bool opp_tables_busy;
 
-static struct opp_device *_find_opp_dev(const struct device *dev,
-                                       struct opp_table *opp_table)
+static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
 {
        struct opp_device *opp_dev;
+       bool found = false;
 
+       mutex_lock(&opp_table->lock);
        list_for_each_entry(opp_dev, &opp_table->dev_list, node)
-               if (opp_dev->dev == dev)
-                       return opp_dev;
+               if (opp_dev->dev == dev) {
+                       found = true;
+                       break;
+               }
 
-       return NULL;
+       mutex_unlock(&opp_table->lock);
+       return found;
 }
 
 static struct opp_table *_find_opp_table_unlocked(struct device *dev)
 {
        struct opp_table *opp_table;
-       bool found;
 
        list_for_each_entry(opp_table, &opp_tables, node) {
-               mutex_lock(&opp_table->lock);
-               found = !!_find_opp_dev(dev, opp_table);
-               mutex_unlock(&opp_table->lock);
-
-               if (found) {
+               if (_find_opp_dev(dev, opp_table)) {
                        _get_opp_table_kref(opp_table);
-
                        return opp_table;
                }
        }
@@ -1036,8 +1036,8 @@ static void _remove_opp_dev(struct opp_device *opp_dev,
        kfree(opp_dev);
 }
 
-static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
-                                               struct opp_table *opp_table)
+struct opp_device *_add_opp_dev(const struct device *dev,
+                               struct opp_table *opp_table)
 {
        struct opp_device *opp_dev;
 
@@ -1048,7 +1048,9 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
        /* Initialize opp-dev */
        opp_dev->dev = dev;
 
+       mutex_lock(&opp_table->lock);
        list_add(&opp_dev->node, &opp_table->dev_list);
+       mutex_unlock(&opp_table->lock);
 
        /* Create debugfs entries for the opp_table */
        opp_debug_register(opp_dev, opp_table);
@@ -1056,18 +1058,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
        return opp_dev;
 }
 
-struct opp_device *_add_opp_dev(const struct device *dev,
-                               struct opp_table *opp_table)
-{
-       struct opp_device *opp_dev;
-
-       mutex_lock(&opp_table->lock);
-       opp_dev = _add_opp_dev_unlocked(dev, opp_table);
-       mutex_unlock(&opp_table->lock);
-
-       return opp_dev;
-}
-
 static struct opp_table *_allocate_opp_table(struct device *dev, int index)
 {
        struct opp_table *opp_table;
@@ -1121,8 +1111,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
        INIT_LIST_HEAD(&opp_table->opp_list);
        kref_init(&opp_table->kref);
 
-       /* Secure the device table modification */
-       list_add(&opp_table->node, &opp_tables);
        return opp_table;
 
 err:
@@ -1135,27 +1123,64 @@ void _get_opp_table_kref(struct opp_table *opp_table)
        kref_get(&opp_table->kref);
 }
 
-static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
+/*
+ * We need to make sure that the OPP table for a device doesn't get added twice,
+ * if this routine gets called in parallel with the same device pointer.
+ *
+ * The simplest way to enforce that is to perform everything (find existing
+ * table and if not found, create a new one) under the opp_table_lock, so only
+ * one creator gets access to the same. But that expands the critical section
+ * under the lock and may end up causing circular dependencies with frameworks
+ * like debugfs, interconnect or clock framework as they may be direct or
+ * indirect users of OPP core.
+ *
+ * And for that reason we have to go for a bit tricky implementation here, which
+ * uses the opp_tables_busy flag to indicate if another creator is in the middle
+ * of adding an OPP table and others should wait for it to finish.
+ */
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index)
 {
        struct opp_table *opp_table;
 
-       /* Hold our table modification lock here */
+again:
        mutex_lock(&opp_table_lock);
 
        opp_table = _find_opp_table_unlocked(dev);
        if (!IS_ERR(opp_table))
                goto unlock;
 
+       /*
+        * The opp_tables list or an OPP table's dev_list is getting updated by
+        * another user, wait for it to finish.
+        */
+       if (unlikely(opp_tables_busy)) {
+               mutex_unlock(&opp_table_lock);
+               cpu_relax();
+               goto again;
+       }
+
+       opp_tables_busy = true;
        opp_table = _managed_opp(dev, index);
+
+       /* Drop the lock to reduce the size of critical section */
+       mutex_unlock(&opp_table_lock);
+
        if (opp_table) {
-               if (!_add_opp_dev_unlocked(dev, opp_table)) {
+               if (!_add_opp_dev(dev, opp_table)) {
                        dev_pm_opp_put_opp_table(opp_table);
                        opp_table = ERR_PTR(-ENOMEM);
                }
-               goto unlock;
+
+               mutex_lock(&opp_table_lock);
+       } else {
+               opp_table = _allocate_opp_table(dev, index);
+
+               mutex_lock(&opp_table_lock);
+               if (!IS_ERR(opp_table))
+                       list_add(&opp_table->node, &opp_tables);
        }
 
-       opp_table = _allocate_opp_table(dev, index);
+       opp_tables_busy = false;
 
 unlock:
        mutex_unlock(&opp_table_lock);
@@ -1163,17 +1188,16 @@ unlock:
        return opp_table;
 }
 
-struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
+struct opp_table *_add_opp_table(struct device *dev)
 {
-       return _opp_get_opp_table(dev, 0);
+       return _add_opp_table_indexed(dev, 0);
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
 
-struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
-                                                  int index)
+struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
 {
-       return _opp_get_opp_table(dev, index);
+       return _find_opp_table(dev);
 }
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
 
 static void _opp_table_kref_release(struct kref *kref)
 {
@@ -1227,9 +1251,14 @@ void _opp_free(struct dev_pm_opp *opp)
        kfree(opp);
 }
 
-static void _opp_kref_release(struct dev_pm_opp *opp,
-                             struct opp_table *opp_table)
+static void _opp_kref_release(struct kref *kref)
 {
+       struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+       struct opp_table *opp_table = opp->opp_table;
+
+       list_del(&opp->node);
+       mutex_unlock(&opp_table->lock);
+
        /*
         * Notify the changes in the availability of the operable
         * frequency/voltage list.
@@ -1237,27 +1266,9 @@ static void _opp_kref_release(struct dev_pm_opp *opp,
        blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
        _of_opp_free_required_opps(opp_table, opp);
        opp_debug_remove_one(opp);
-       list_del(&opp->node);
        kfree(opp);
 }
 
-static void _opp_kref_release_unlocked(struct kref *kref)
-{
-       struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
-       struct opp_table *opp_table = opp->opp_table;
-
-       _opp_kref_release(opp, opp_table);
-}
-
-static void _opp_kref_release_locked(struct kref *kref)
-{
-       struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
-       struct opp_table *opp_table = opp->opp_table;
-
-       _opp_kref_release(opp, opp_table);
-       mutex_unlock(&opp_table->lock);
-}
-
 void dev_pm_opp_get(struct dev_pm_opp *opp)
 {
        kref_get(&opp->kref);
@@ -1265,16 +1276,10 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
 
 void dev_pm_opp_put(struct dev_pm_opp *opp)
 {
-       kref_put_mutex(&opp->kref, _opp_kref_release_locked,
-                      &opp->opp_table->lock);
+       kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_put);
 
-static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
-{
-       kref_put(&opp->kref, _opp_kref_release_unlocked);
-}
-
 /**
  * dev_pm_opp_remove()  - Remove an OPP from OPP table
  * @dev:       device for which we do this operation
@@ -1318,30 +1323,49 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
+static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
+                                       bool dynamic)
+{
+       struct dev_pm_opp *opp = NULL, *temp;
+
+       mutex_lock(&opp_table->lock);
+       list_for_each_entry(temp, &opp_table->opp_list, node) {
+               if (dynamic == temp->dynamic) {
+                       opp = temp;
+                       break;
+               }
+       }
+
+       mutex_unlock(&opp_table->lock);
+       return opp;
+}
+
 bool _opp_remove_all_static(struct opp_table *opp_table)
 {
-       struct dev_pm_opp *opp, *tmp;
-       bool ret = true;
+       struct dev_pm_opp *opp;
 
        mutex_lock(&opp_table->lock);
 
        if (!opp_table->parsed_static_opps) {
-               ret = false;
-               goto unlock;
+               mutex_unlock(&opp_table->lock);
+               return false;
        }
 
-       if (--opp_table->parsed_static_opps)
-               goto unlock;
-
-       list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
-               if (!opp->dynamic)
-                       dev_pm_opp_put_unlocked(opp);
+       if (--opp_table->parsed_static_opps) {
+               mutex_unlock(&opp_table->lock);
+               return true;
        }
 
-unlock:
        mutex_unlock(&opp_table->lock);
 
-       return ret;
+       /*
+        * Can't remove the OPP from under the lock, debugfs removal needs to
+        * happen lock less to avoid circular dependency issues.
+        */
+       while ((opp = _opp_get_next(opp_table, false)))
+               dev_pm_opp_put(opp);
+
+       return true;
 }
 
 /**
@@ -1353,21 +1377,21 @@ unlock:
 void dev_pm_opp_remove_all_dynamic(struct device *dev)
 {
        struct opp_table *opp_table;
-       struct dev_pm_opp *opp, *temp;
+       struct dev_pm_opp *opp;
        int count = 0;
 
        opp_table = _find_opp_table(dev);
        if (IS_ERR(opp_table))
                return;
 
-       mutex_lock(&opp_table->lock);
-       list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
-               if (opp->dynamic) {
-                       dev_pm_opp_put_unlocked(opp);
-                       count++;
-               }
+       /*
+        * Can't remove the OPP from under the lock, debugfs removal needs to
+        * happen lock less to avoid circular dependency issues.
+        */
+       while ((opp = _opp_get_next(opp_table, true))) {
+               dev_pm_opp_put(opp);
+               count++;
        }
-       mutex_unlock(&opp_table->lock);
 
        /* Drop the references taken by dev_pm_opp_add() */
        while (count--)
@@ -1602,7 +1626,7 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
 {
        struct opp_table *opp_table;
 
-       opp_table = dev_pm_opp_get_opp_table(dev);
+       opp_table = _add_opp_table(dev);
        if (IS_ERR(opp_table))
                return opp_table;
 
@@ -1636,6 +1660,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  */
 void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
 {
+       if (unlikely(!opp_table))
+               return;
+
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
@@ -1661,7 +1688,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 {
        struct opp_table *opp_table;
 
-       opp_table = dev_pm_opp_get_opp_table(dev);
+       opp_table = _add_opp_table(dev);
        if (IS_ERR(opp_table))
                return opp_table;
 
@@ -1692,6 +1719,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  */
 void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
 {
+       if (unlikely(!opp_table))
+               return;
+
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
@@ -1754,7 +1784,7 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
        struct regulator *reg;
        int ret, i;
 
-       opp_table = dev_pm_opp_get_opp_table(dev);
+       opp_table = _add_opp_table(dev);
        if (IS_ERR(opp_table))
                return opp_table;
 
@@ -1820,6 +1850,9 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
 {
        int i;
 
+       if (unlikely(!opp_table))
+               return;
+
        if (!opp_table->regulators)
                goto put_opp_table;
 
@@ -1862,7 +1895,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
        struct opp_table *opp_table;
        int ret;
 
-       opp_table = dev_pm_opp_get_opp_table(dev);
+       opp_table = _add_opp_table(dev);
        if (IS_ERR(opp_table))
                return opp_table;
 
@@ -1902,6 +1935,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
  */
 void dev_pm_opp_put_clkname(struct opp_table *opp_table)
 {
+       if (unlikely(!opp_table))
+               return;
+
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
@@ -1930,7 +1966,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
        if (!set_opp)
                return ERR_PTR(-EINVAL);
 
-       opp_table = dev_pm_opp_get_opp_table(dev);
+       opp_table = _add_opp_table(dev);
        if (IS_ERR(opp_table))
                return opp_table;
 
@@ -1957,6 +1993,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
  */
 void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
 {
+       if (unlikely(!opp_table))
+               return;
+
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
@@ -2014,7 +2053,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
        int index = 0, ret = -EINVAL;
        const char **name = names;
 
-       opp_table = dev_pm_opp_get_opp_table(dev);
+       opp_table = _add_opp_table(dev);
        if (IS_ERR(opp_table))
                return opp_table;
 
@@ -2085,6 +2124,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
  */
 void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
 {
+       if (unlikely(!opp_table))
+               return;
+
        /*
         * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
         * used in parallel.
@@ -2179,7 +2221,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
        struct opp_table *opp_table;
        int ret;
 
-       opp_table = dev_pm_opp_get_opp_table(dev);
+       opp_table = _add_opp_table(dev);
        if (IS_ERR(opp_table))
                return PTR_ERR(opp_table);
 
index 9faeb83e4b326217e538d570f29e7e333d5888d5..d41088578aabc21b2c87b85eafef1fd7292a446c 100644 (file)
@@ -112,8 +112,6 @@ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
        struct opp_table *opp_table;
        struct device_node *opp_table_np;
 
-       lockdep_assert_held(&opp_table_lock);
-
        opp_table_np = of_get_parent(opp_np);
        if (!opp_table_np)
                goto err;
@@ -121,12 +119,15 @@ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
        /* It is safe to put the node now as all we need now is its address */
        of_node_put(opp_table_np);
 
+       mutex_lock(&opp_table_lock);
        list_for_each_entry(opp_table, &opp_tables, node) {
                if (opp_table_np == opp_table->np) {
                        _get_opp_table_kref(opp_table);
+                       mutex_unlock(&opp_table_lock);
                        return opp_table;
                }
        }
+       mutex_unlock(&opp_table_lock);
 
 err:
        return ERR_PTR(-ENODEV);
@@ -169,7 +170,8 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
        /* Traversing the first OPP node is all we need */
        np = of_get_next_available_child(opp_np, NULL);
        if (!np) {
-               dev_err(dev, "Empty OPP table\n");
+               dev_warn(dev, "Empty OPP table\n");
+
                return;
        }
 
@@ -377,7 +379,9 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev,
        struct icc_path **paths;
 
        ret = _bandwidth_supported(dev, opp_table);
-       if (ret <= 0)
+       if (ret == -EINVAL)
+               return 0; /* Empty OPP table is a valid corner-case, let's not fail */
+       else if (ret <= 0)
                return ret;
 
        ret = 0;
@@ -974,7 +978,7 @@ int dev_pm_opp_of_add_table(struct device *dev)
        struct opp_table *opp_table;
        int ret;
 
-       opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
+       opp_table = _add_opp_table_indexed(dev, 0);
        if (IS_ERR(opp_table))
                return PTR_ERR(opp_table);
 
@@ -1029,7 +1033,7 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
                        index = 0;
        }
 
-       opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
+       opp_table = _add_opp_table_indexed(dev, index);
        if (IS_ERR(opp_table))
                return PTR_ERR(opp_table);
 
index ebd930e0b3cad3aa4d170ac3f0e8a3a9997ec8dd..4ced7ffa8158edafb17301c09b91bde731f53e45 100644 (file)
@@ -224,6 +224,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *o
 int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
 void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
 struct opp_table *_add_opp_table(struct device *dev);
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index);
 void _put_opp_list_kref(struct opp_table *opp_table);
 
 #ifdef CONFIG_OF
index dbb484524f820d88679d68ca634c199e885a1279..1435c054016a6e78809df22cd752f99b8f816457 100644 (file)
@@ -90,7 +90,6 @@ struct dev_pm_set_opp_data {
 #if defined(CONFIG_PM_OPP)
 
 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
-struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
 void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
 
 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);