- OPP nodes: One or more OPP nodes describing voltage-current-frequency
combinations. Their name isn't significant but their phandle can be used to
- reference an OPP.
+ reference an OPP. These are mandatory except for the case where the OPP table
+ is present only to indicate dependency between devices using the opp-shared
+ property.
Optional properties:
- opp-shared: Indicates that device nodes using this OPP Table Node's phandle
};
};
};
+
+Example 7: Single cluster Quad-core ARM cortex A53, OPP points from firmware,
+distinct clock controls but two sets of clock/voltage/current lines.
+
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x100>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 0>;
+ operating-points-v2 = <&cpu_opp0_table>;
+ };
+ cpu@1 {
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x101>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 1>;
+ operating-points-v2 = <&cpu_opp0_table>;
+ };
+ cpu@2 {
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x102>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 2>;
+ operating-points-v2 = <&cpu_opp1_table>;
+ };
+ cpu@3 {
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x103>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 3>;
+ operating-points-v2 = <&cpu_opp1_table>;
+ };
+
+ };
+
+ cpu_opp0_table: opp0_table {
+ compatible = "operating-points-v2";
+ opp-shared;
+ };
+
+ cpu_opp1_table: opp1_table {
+ compatible = "operating-points-v2";
+ opp-shared;
+ };
+};
* Save table for faster processing while setting
* performance state.
*/
- genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
WARN_ON(IS_ERR(genpd->opp_table));
}
cpumask_var_t cpus;
struct device *cpu_dev;
struct opp_table *opp_table;
- struct opp_table *reg_opp_table;
+ struct cpufreq_frequency_table *freq_table;
bool have_static_opps;
};
static int cpufreq_init(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *freq_table;
struct private_data *priv;
struct device *cpu_dev;
struct clk *cpu_clk;
pr_err("failed to find data for cpu%d\n", policy->cpu);
return -ENODEV;
}
-
cpu_dev = priv->cpu_dev;
- cpumask_copy(policy->cpus, priv->cpus);
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
return ret;
}
- /*
- * Initialize OPP tables for all policy->cpus. They will be shared by
- * all CPUs which have marked their CPUs shared with OPP bindings.
- *
- * For platforms not using operating-points-v2 bindings, we do this
- * before updating policy->cpus. Otherwise, we will end up creating
- * duplicate OPPs for policy->cpus.
- *
- * OPPs might be populated at runtime, don't check for error here
- */
- if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
- priv->have_static_opps = true;
-
- /*
- * But we need OPP table to function so if it is not there let's
- * give platform code chance to provide it for us.
- */
- ret = dev_pm_opp_get_opp_count(cpu_dev);
- if (ret <= 0) {
- dev_err(cpu_dev, "OPP table can't be empty\n");
- ret = -ENODEV;
- goto out_free_opp;
- }
-
- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
- if (ret) {
- dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_free_opp;
- }
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
+ cpumask_copy(policy->cpus, priv->cpus);
policy->driver_data = priv;
policy->clk = cpu_clk;
- policy->freq_table = freq_table;
-
+ policy->freq_table = priv->freq_table;
policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->dvfs_possible_from_any_cpu = true;
/* Support turbo/boost mode */
if (policy_has_boost_freq(policy)) {
/* This gets disabled by core on driver unregister */
ret = cpufreq_enable_boost_support();
if (ret)
- goto out_free_cpufreq_table;
+ goto out_clk_put;
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
- transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
- if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->dvfs_possible_from_any_cpu = true;
-
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
-out_free_cpufreq_table:
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
-out_free_opp:
- if (priv->have_static_opps)
- dev_pm_opp_of_cpumask_remove_table(policy->cpus);
+out_clk_put:
clk_put(cpu_clk);
return ret;
static int cpufreq_exit(struct cpufreq_policy *policy)
{
- struct private_data *priv = policy->driver_data;
-
- dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- if (priv->have_static_opps)
- dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
clk_put(policy->clk);
return 0;
}
{
struct private_data *priv;
struct device *cpu_dev;
+ bool fallback = false;
const char *reg_name;
int ret;
if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM;
+ cpumask_set_cpu(cpu, priv->cpus);
priv->cpu_dev = cpu_dev;
- /* Try to get OPP table early to ensure resources are available */
- priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev);
- if (IS_ERR(priv->opp_table)) {
- ret = PTR_ERR(priv->opp_table);
- if (ret != -EPROBE_DEFER)
- dev_err(cpu_dev, "failed to get OPP table: %d\n", ret);
- goto free_cpumask;
- }
-
/*
* OPP layer will be taking care of regulators now, but it needs to know
* the name of the regulator first.
*/
reg_name = find_supply_name(cpu_dev);
if (reg_name) {
- priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev,
- ®_name, 1);
- if (IS_ERR(priv->reg_opp_table)) {
- ret = PTR_ERR(priv->reg_opp_table);
+ priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, ®_name,
+ 1);
+ if (IS_ERR(priv->opp_table)) {
+ ret = PTR_ERR(priv->opp_table);
if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to set regulators: %d\n",
ret);
- goto put_table;
+ goto free_cpumask;
}
}
- /* Find OPP sharing information so we can fill pri->cpus here */
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
if (ret) {
if (ret != -ENOENT)
- goto put_reg;
+ goto out;
/*
* operating-points-v2 not supported, fallback to all CPUs share
* OPP for backward compatibility if the platform hasn't set
* sharing CPUs.
*/
- if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) {
- cpumask_setall(priv->cpus);
-
- /*
- * OPP tables are initialized only for cpu, do it for
- * others as well.
- */
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
- if (ret)
- dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
- __func__, ret);
- }
+ if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
+ fallback = true;
+ }
+
+ /*
+ * Initialize OPP tables for all priv->cpus. They will be shared by
+ * all CPUs which have marked their CPUs shared with OPP bindings.
+ *
+ * For platforms not using operating-points-v2 bindings, we do this
+ * before updating priv->cpus. Otherwise, we will end up creating
+ * duplicate OPPs for the CPUs.
+ *
+ * OPPs might be populated at runtime, don't check for error here.
+ */
+ if (!dev_pm_opp_of_cpumask_add_table(priv->cpus))
+ priv->have_static_opps = true;
+
+ /*
+ * The OPP table must be initialized, statically or dynamically, by this
+ * point.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_err(cpu_dev, "OPP table can't be empty\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (fallback) {
+ cpumask_setall(priv->cpus);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out;
}
list_add(&priv->node, &priv_list);
return 0;
-put_reg:
- if (priv->reg_opp_table)
- dev_pm_opp_put_regulators(priv->reg_opp_table);
-put_table:
- dev_pm_opp_put_opp_table(priv->opp_table);
+out:
+ if (priv->have_static_opps)
+ dev_pm_opp_of_cpumask_remove_table(priv->cpus);
+ dev_pm_opp_put_regulators(priv->opp_table);
free_cpumask:
free_cpumask_var(priv->cpus);
return ret;
struct private_data *priv, *tmp;
list_for_each_entry_safe(priv, tmp, &priv_list, node) {
- if (priv->reg_opp_table)
- dev_pm_opp_put_regulators(priv->reg_opp_table);
- dev_pm_opp_put_opp_table(priv->opp_table);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
+ if (priv->have_static_opps)
+ dev_pm_opp_of_cpumask_remove_table(priv->cpus);
+ dev_pm_opp_put_regulators(priv->opp_table);
free_cpumask_var(priv->cpus);
list_del(&priv->node);
}
free_genpd_opp:
for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu]))
+ if (IS_ERR(drv->genpd_opp_tables[cpu]))
break;
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
}
kfree(drv->genpd_opp_tables);
free_opp:
for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(drv->names_opp_tables[cpu]))
+ if (IS_ERR(drv->names_opp_tables[cpu]))
break;
dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
}
for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(drv->hw_opp_tables[cpu]))
+ if (IS_ERR(drv->hw_opp_tables[cpu]))
break;
dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
}
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu) {
- if (drv->names_opp_tables[cpu])
- dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
- if (drv->hw_opp_tables[cpu])
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- if (drv->genpd_opp_tables[cpu])
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
+ dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
+ dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
+ dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
}
kfree(drv->names_opp_tables);
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
- if (bus->opp_table) {
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
- }
+ dev_pm_opp_put_regulators(bus->opp_table);
+ bus->opp_table = NULL;
}
static void exynos_bus_passive_exit(struct device *dev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
- if (!passive) {
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
- }
+ dev_pm_opp_put_regulators(bus->opp_table);
+ bus->opp_table = NULL;
return ret;
}
devfreq->opp_of_table_added = false;
}
- if (devfreq->regulators_opp_table) {
- dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
- devfreq->regulators_opp_table = NULL;
- }
-
- if (devfreq->clkname_opp_table) {
- dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
- devfreq->clkname_opp_table = NULL;
- }
+ dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
+ dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
+ devfreq->regulators_opp_table = NULL;
+ devfreq->clkname_opp_table = NULL;
}
int lima_devfreq_init(struct lima_device *ldev)
pfdevfreq->opp_of_table_added = false;
}
- if (pfdevfreq->regulators_opp_table) {
- dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
- pfdevfreq->regulators_opp_table = NULL;
- }
+ dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
+ pfdevfreq->regulators_opp_table = NULL;
}
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
if (core->has_opp_table)
dev_pm_opp_of_remove_table(dev);
- if (core->opp_table)
- dev_pm_opp_put_clkname(core->opp_table);
+ dev_pm_opp_put_clkname(core->opp_table);
}
LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
+/* Flag indicating that opp_tables list is being updated at the moment */
+static bool opp_tables_busy;
-static struct opp_device *_find_opp_dev(const struct device *dev,
- struct opp_table *opp_table)
+static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
{
struct opp_device *opp_dev;
+ bool found = false;
+ mutex_lock(&opp_table->lock);
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
- if (opp_dev->dev == dev)
- return opp_dev;
+ if (opp_dev->dev == dev) {
+ found = true;
+ break;
+ }
- return NULL;
+ mutex_unlock(&opp_table->lock);
+ return found;
}
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
{
struct opp_table *opp_table;
- bool found;
list_for_each_entry(opp_table, &opp_tables, node) {
- mutex_lock(&opp_table->lock);
- found = !!_find_opp_dev(dev, opp_table);
- mutex_unlock(&opp_table->lock);
-
- if (found) {
+ if (_find_opp_dev(dev, opp_table)) {
_get_opp_table_kref(opp_table);
-
return opp_table;
}
}
kfree(opp_dev);
}
-static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
- struct opp_table *opp_table)
+struct opp_device *_add_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
struct opp_device *opp_dev;
/* Initialize opp-dev */
opp_dev->dev = dev;
+ mutex_lock(&opp_table->lock);
list_add(&opp_dev->node, &opp_table->dev_list);
+ mutex_unlock(&opp_table->lock);
/* Create debugfs entries for the opp_table */
opp_debug_register(opp_dev, opp_table);
return opp_dev;
}
-struct opp_device *_add_opp_dev(const struct device *dev,
- struct opp_table *opp_table)
-{
- struct opp_device *opp_dev;
-
- mutex_lock(&opp_table->lock);
- opp_dev = _add_opp_dev_unlocked(dev, opp_table);
- mutex_unlock(&opp_table->lock);
-
- return opp_dev;
-}
-
static struct opp_table *_allocate_opp_table(struct device *dev, int index)
{
struct opp_table *opp_table;
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
- /* Secure the device table modification */
- list_add(&opp_table->node, &opp_tables);
return opp_table;
err:
kref_get(&opp_table->kref);
}
-static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
+/*
+ * We need to make sure that the OPP table for a device doesn't get added twice,
+ * if this routine gets called in parallel with the same device pointer.
+ *
+ * The simplest way to enforce that is to perform everything (find existing
+ * table and if not found, create a new one) under the opp_table_lock, so only
+ * one creator gets access to the same. But that expands the critical section
+ * under the lock and may end up causing circular dependencies with frameworks
+ * like debugfs, interconnect or clock framework as they may be direct or
+ * indirect users of OPP core.
+ *
+ * And for that reason we have to go for a bit tricky implementation here, which
+ * uses the opp_tables_busy flag to indicate if another creator is in the middle
+ * of adding an OPP table and others should wait for it to finish.
+ */
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index)
{
struct opp_table *opp_table;
- /* Hold our table modification lock here */
+again:
mutex_lock(&opp_table_lock);
opp_table = _find_opp_table_unlocked(dev);
if (!IS_ERR(opp_table))
goto unlock;
+ /*
+ * The opp_tables list or an OPP table's dev_list is getting updated by
+ * another user, wait for it to finish.
+ */
+ if (unlikely(opp_tables_busy)) {
+ mutex_unlock(&opp_table_lock);
+ cpu_relax();
+ goto again;
+ }
+
+ opp_tables_busy = true;
opp_table = _managed_opp(dev, index);
+
+ /* Drop the lock to reduce the size of critical section */
+ mutex_unlock(&opp_table_lock);
+
if (opp_table) {
- if (!_add_opp_dev_unlocked(dev, opp_table)) {
+ if (!_add_opp_dev(dev, opp_table)) {
dev_pm_opp_put_opp_table(opp_table);
opp_table = ERR_PTR(-ENOMEM);
}
- goto unlock;
+
+ mutex_lock(&opp_table_lock);
+ } else {
+ opp_table = _allocate_opp_table(dev, index);
+
+ mutex_lock(&opp_table_lock);
+ if (!IS_ERR(opp_table))
+ list_add(&opp_table->node, &opp_tables);
}
- opp_table = _allocate_opp_table(dev, index);
+ opp_tables_busy = false;
unlock:
mutex_unlock(&opp_table_lock);
return opp_table;
}
-struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
+struct opp_table *_add_opp_table(struct device *dev)
{
- return _opp_get_opp_table(dev, 0);
+ return _add_opp_table_indexed(dev, 0);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
-struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
- int index)
+struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
- return _opp_get_opp_table(dev, index);
+ return _find_opp_table(dev);
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
static void _opp_table_kref_release(struct kref *kref)
{
kfree(opp);
}
-static void _opp_kref_release(struct dev_pm_opp *opp,
- struct opp_table *opp_table)
+static void _opp_kref_release(struct kref *kref)
{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+
+ list_del(&opp->node);
+ mutex_unlock(&opp_table->lock);
+
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
_of_opp_free_required_opps(opp_table, opp);
opp_debug_remove_one(opp);
- list_del(&opp->node);
kfree(opp);
}
-static void _opp_kref_release_unlocked(struct kref *kref)
-{
- struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
- struct opp_table *opp_table = opp->opp_table;
-
- _opp_kref_release(opp, opp_table);
-}
-
-static void _opp_kref_release_locked(struct kref *kref)
-{
- struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
- struct opp_table *opp_table = opp->opp_table;
-
- _opp_kref_release(opp, opp_table);
- mutex_unlock(&opp_table->lock);
-}
-
void dev_pm_opp_get(struct dev_pm_opp *opp)
{
kref_get(&opp->kref);
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
- kref_put_mutex(&opp->kref, _opp_kref_release_locked,
- &opp->opp_table->lock);
+ kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put);
-static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
-{
- kref_put(&opp->kref, _opp_kref_release_unlocked);
-}
-
/**
* dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
+ bool dynamic)
+{
+ struct dev_pm_opp *opp = NULL, *temp;
+
+ mutex_lock(&opp_table->lock);
+ list_for_each_entry(temp, &opp_table->opp_list, node) {
+ if (dynamic == temp->dynamic) {
+ opp = temp;
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ return opp;
+}
+
bool _opp_remove_all_static(struct opp_table *opp_table)
{
- struct dev_pm_opp *opp, *tmp;
- bool ret = true;
+ struct dev_pm_opp *opp;
mutex_lock(&opp_table->lock);
if (!opp_table->parsed_static_opps) {
- ret = false;
- goto unlock;
+ mutex_unlock(&opp_table->lock);
+ return false;
}
- if (--opp_table->parsed_static_opps)
- goto unlock;
-
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (!opp->dynamic)
- dev_pm_opp_put_unlocked(opp);
+ if (--opp_table->parsed_static_opps) {
+ mutex_unlock(&opp_table->lock);
+ return true;
}
-unlock:
mutex_unlock(&opp_table->lock);
- return ret;
+ /*
+ * Can't remove the OPP from under the lock, debugfs removal needs to
+ * happen lock less to avoid circular dependency issues.
+ */
+ while ((opp = _opp_get_next(opp_table, false)))
+ dev_pm_opp_put(opp);
+
+ return true;
}
/**
void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
struct opp_table *opp_table;
- struct dev_pm_opp *opp, *temp;
+ struct dev_pm_opp *opp;
int count = 0;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return;
- mutex_lock(&opp_table->lock);
- list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
- if (opp->dynamic) {
- dev_pm_opp_put_unlocked(opp);
- count++;
- }
+ /*
+ * Can't remove the OPP from under the lock, debugfs removal needs to
+ * happen lock less to avoid circular dependency issues.
+ */
+ while ((opp = _opp_get_next(opp_table, true))) {
+ dev_pm_opp_put(opp);
+ count++;
}
- mutex_unlock(&opp_table->lock);
/* Drop the references taken by dev_pm_opp_add() */
while (count--)
{
struct opp_table *opp_table;
- opp_table = dev_pm_opp_get_opp_table(dev);
+ opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
*/
void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
{
+ if (unlikely(!opp_table))
+ return;
+
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
{
struct opp_table *opp_table;
- opp_table = dev_pm_opp_get_opp_table(dev);
+ opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
*/
void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
{
+ if (unlikely(!opp_table))
+ return;
+
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
struct regulator *reg;
int ret, i;
- opp_table = dev_pm_opp_get_opp_table(dev);
+ opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
{
int i;
+ if (unlikely(!opp_table))
+ return;
+
if (!opp_table->regulators)
goto put_opp_table;
struct opp_table *opp_table;
int ret;
- opp_table = dev_pm_opp_get_opp_table(dev);
+ opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
*/
void dev_pm_opp_put_clkname(struct opp_table *opp_table)
{
+ if (unlikely(!opp_table))
+ return;
+
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
if (!set_opp)
return ERR_PTR(-EINVAL);
- opp_table = dev_pm_opp_get_opp_table(dev);
+ opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
*/
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
{
+ if (unlikely(!opp_table))
+ return;
+
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
int index = 0, ret = -EINVAL;
const char **name = names;
- opp_table = dev_pm_opp_get_opp_table(dev);
+ opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
*/
void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
{
+ if (unlikely(!opp_table))
+ return;
+
/*
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
* used in parallel.
struct opp_table *opp_table;
int ret;
- opp_table = dev_pm_opp_get_opp_table(dev);
+ opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
struct opp_table *opp_table;
struct device_node *opp_table_np;
- lockdep_assert_held(&opp_table_lock);
-
opp_table_np = of_get_parent(opp_np);
if (!opp_table_np)
goto err;
/* It is safe to put the node now as all we need now is its address */
of_node_put(opp_table_np);
+ mutex_lock(&opp_table_lock);
list_for_each_entry(opp_table, &opp_tables, node) {
if (opp_table_np == opp_table->np) {
_get_opp_table_kref(opp_table);
+ mutex_unlock(&opp_table_lock);
return opp_table;
}
}
+ mutex_unlock(&opp_table_lock);
err:
return ERR_PTR(-ENODEV);
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
if (!np) {
- dev_err(dev, "Empty OPP table\n");
+ dev_warn(dev, "Empty OPP table\n");
+
return;
}
struct icc_path **paths;
ret = _bandwidth_supported(dev, opp_table);
- if (ret <= 0)
+ if (ret == -EINVAL)
+ return 0; /* Empty OPP table is a valid corner-case, let's not fail */
+ else if (ret <= 0)
return ret;
ret = 0;
struct opp_table *opp_table;
int ret;
- opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
+ opp_table = _add_opp_table_indexed(dev, 0);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
index = 0;
}
- opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
+ opp_table = _add_opp_table_indexed(dev, index);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table(struct device *dev);
+struct opp_table *_add_opp_table_indexed(struct device *dev, int index);
void _put_opp_list_kref(struct opp_table *opp_table);
#ifdef CONFIG_OF
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
-struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);