gov->attr_group_gov_pol : gov->attr_group_gov_sys;
}
-void dbs_check_cpu(struct cpufreq_policy *policy, int cpu)
+void dbs_check_cpu(struct cpufreq_policy *policy)
{
+ int cpu = policy->cpu;
struct dbs_governor *gov = dbs_governor_of(policy);
- struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int sampling_rate;
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
+ struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
u64 cur_nice;
unsigned long cur_nice_jiffies;
static void gov_cancel_work(struct policy_dbs_info *policy_dbs)
{
/* Tell dbs_update_util_handler() to skip queuing up work items. */
- atomic_inc(&policy_dbs->skip_work);
+ atomic_inc(&policy_dbs->work_count);
/*
* If dbs_update_util_handler() is already running, it may not notice
- * the incremented skip_work, so wait for it to complete to prevent its
+ * the incremented work_count, so wait for it to complete to prevent its
* work item from being queued up after the cancel_work_sync() below.
*/
gov_clear_update_util(policy_dbs->policy);
irq_work_sync(&policy_dbs->irq_work);
cancel_work_sync(&policy_dbs->work);
- atomic_set(&policy_dbs->skip_work, 0);
+ atomic_set(&policy_dbs->work_count, 0);
}
static void dbs_work_handler(struct work_struct *work)
* up using a stale sample delay value.
*/
smp_mb__before_atomic();
- atomic_dec(&policy_dbs->skip_work);
+ atomic_dec(&policy_dbs->work_count);
}
static void dbs_irq_work(struct irq_work *irq_work)
* - The governor is being stopped.
* - It is too early (too little time from the previous sample).
*/
- if (atomic_inc_return(&policy_dbs->skip_work) == 1) {
+ if (atomic_inc_return(&policy_dbs->work_count) == 1) {
u64 delta_ns;
delta_ns = time - policy_dbs->last_sample_time;
return;
}
}
- atomic_dec(&policy_dbs->skip_work);
+ atomic_dec(&policy_dbs->work_count);
}
static void set_sampling_rate(struct dbs_data *dbs_data,
}
}
-static int alloc_policy_dbs_info(struct cpufreq_policy *policy,
- struct dbs_governor *gov)
+static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
+ struct dbs_governor *gov)
{
struct policy_dbs_info *policy_dbs;
int j;
/* Allocate memory for the common information for policy->cpus */
policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL);
if (!policy_dbs)
- return -ENOMEM;
-
- /* Set policy_dbs for all CPUs, online+offline */
- for_each_cpu(j, policy->related_cpus)
- gov->get_cpu_cdbs(j)->policy_dbs = policy_dbs;
+ return NULL;
mutex_init(&policy_dbs->timer_mutex);
- atomic_set(&policy_dbs->skip_work, 0);
+ atomic_set(&policy_dbs->work_count, 0);
init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
INIT_WORK(&policy_dbs->work, dbs_work_handler);
- return 0;
+
+ /* Set policy_dbs for all CPUs, online+offline */
+ for_each_cpu(j, policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
+
+ j_cdbs->policy_dbs = policy_dbs;
+ j_cdbs->update_util.func = dbs_update_util_handler;
+ }
+ return policy_dbs;
}
static void free_policy_dbs_info(struct cpufreq_policy *policy,
mutex_destroy(&policy_dbs->timer_mutex);
- for_each_cpu(j, policy->cpus)
- gov->get_cpu_cdbs(j)->policy_dbs = NULL;
+ for_each_cpu(j, policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
+ j_cdbs->policy_dbs = NULL;
+ j_cdbs->update_util.func = NULL;
+ }
kfree(policy_dbs);
}
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct dbs_data *dbs_data = gov->gdbs_data;
+ struct policy_dbs_info *policy_dbs;
unsigned int latency;
int ret;
if (policy->governor_data)
return -EBUSY;
- if (dbs_data) {
- if (WARN_ON(have_governor_per_policy()))
- return -EINVAL;
-
- ret = alloc_policy_dbs_info(policy, gov);
- if (ret)
- return ret;
+ policy_dbs = alloc_policy_dbs_info(policy, gov);
+ if (!policy_dbs)
+ return -ENOMEM;
+ if (dbs_data) {
+ if (WARN_ON(have_governor_per_policy())) {
+ ret = -EINVAL;
+ goto free_policy_dbs_info;
+ }
dbs_data->usage_count++;
- policy->governor_data = dbs_data;
+ policy_dbs->dbs_data = dbs_data;
+ policy->governor_data = policy_dbs;
return 0;
}
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
- if (!dbs_data)
- return -ENOMEM;
-
- ret = alloc_policy_dbs_info(policy, gov);
- if (ret)
- goto free_dbs_data;
+ if (!dbs_data) {
+ ret = -ENOMEM;
+ goto free_policy_dbs_info;
+ }
dbs_data->usage_count = 1;
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
- policy->governor_data = dbs_data;
+ policy_dbs->dbs_data = dbs_data;
+ policy->governor_data = policy_dbs;
ret = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(gov));
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data, !policy->governor->initialized);
+ kfree(dbs_data);
+
free_policy_dbs_info:
free_policy_dbs_info(policy, gov);
-free_dbs_data:
- kfree(dbs_data);
return ret;
}
static int cpufreq_governor_exit(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
- struct dbs_data *dbs_data = policy->governor_data;
- struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
/* State should be equivalent to INIT */
- if (!cdbs->policy_dbs || cdbs->policy_dbs->policy)
+ if (policy_dbs->policy)
return -EBUSY;
if (!--dbs_data->usage_count) {
static int cpufreq_governor_start(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
- struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
- struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
int io_busy = 0;
if (!policy->cur)
return -EINVAL;
/* State should be equivalent to INIT */
- if (!policy_dbs || policy_dbs->policy)
+ if (policy_dbs->policy)
return -EBUSY;
if (gov->governor == GOV_CONSERVATIVE) {
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
-
- j_cdbs->update_util.func = dbs_update_util_handler;
}
policy_dbs->policy = policy;
static int cpufreq_governor_stop(struct cpufreq_policy *policy)
{
- struct dbs_governor *gov = dbs_governor_of(policy);
- struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
- struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
/* State should be equivalent to START */
- if (!policy_dbs || !policy_dbs->policy)
+ if (!policy_dbs->policy)
return -EBUSY;
gov_cancel_work(policy_dbs);
static int cpufreq_governor_limits(struct cpufreq_policy *policy)
{
- struct dbs_governor *gov = dbs_governor_of(policy);
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
/* State should be equivalent to START */
- if (!cdbs->policy_dbs || !cdbs->policy_dbs->policy)
+ if (!policy_dbs->policy)
return -EBUSY;
- mutex_lock(&cdbs->policy_dbs->timer_mutex);
- if (policy->max < cdbs->policy_dbs->policy->cur)
- __cpufreq_driver_target(cdbs->policy_dbs->policy, policy->max,
- CPUFREQ_RELATION_H);
- else if (policy->min > cdbs->policy_dbs->policy->cur)
- __cpufreq_driver_target(cdbs->policy_dbs->policy, policy->min,
- CPUFREQ_RELATION_L);
- dbs_check_cpu(policy, cpu);
- mutex_unlock(&cdbs->policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->timer_mutex);
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ dbs_check_cpu(policy);
+ mutex_unlock(&policy_dbs->timer_mutex);
return 0;
}