return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
+
+/**
+ * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
+ *
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
+ int ret = 0;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto unlock;
+ }
+
+ cpumask_clear(cpumask);
+
+ if (opp_table->shared_opp) {
+ list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ cpumask_set_cpu(opp_dev->dev->id, cpumask);
+ } else {
+ cpumask_set_cpu(cpu_dev->id, cpumask);
+ }
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
void dev_pm_opp_put_regulator(struct device *dev);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const cpumask_var_t cpumask);
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
#else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
return -ENOTSUPP;
}
+static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)