Merge back earlier 'pm-cpufreq' material.
This commit is contained in:
commit
3b4aff0472
5 changed files with 86 additions and 99 deletions
|
@ -303,6 +303,14 @@ source "drivers/cpuidle/Kconfig"
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
menu "Power management options"
|
||||||
|
|
||||||
|
source "kernel/power/Kconfig"
|
||||||
|
|
||||||
|
source "drivers/cpufreq/Kconfig"
|
||||||
|
|
||||||
|
endmenu
|
||||||
|
|
||||||
source "net/Kconfig"
|
source "net/Kconfig"
|
||||||
|
|
||||||
source "drivers/Kconfig"
|
source "drivers/Kconfig"
|
||||||
|
|
|
@ -200,7 +200,7 @@ source "drivers/cpufreq/Kconfig.x86"
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
menu "ARM CPU frequency scaling drivers"
|
menu "ARM CPU frequency scaling drivers"
|
||||||
depends on ARM
|
depends on ARM || ARM64
|
||||||
source "drivers/cpufreq/Kconfig.arm"
|
source "drivers/cpufreq/Kconfig.arm"
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
|
|
@ -352,7 +352,7 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
|
||||||
/*********************************************************************
|
/*********************************************************************
|
||||||
* SYSFS INTERFACE *
|
* SYSFS INTERFACE *
|
||||||
*********************************************************************/
|
*********************************************************************/
|
||||||
ssize_t show_boost(struct kobject *kobj,
|
static ssize_t show_boost(struct kobject *kobj,
|
||||||
struct attribute *attr, char *buf)
|
struct attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
|
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
|
||||||
|
@ -2012,22 +2012,21 @@ EXPORT_SYMBOL(cpufreq_get_policy);
|
||||||
static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||||
struct cpufreq_policy *new_policy)
|
struct cpufreq_policy *new_policy)
|
||||||
{
|
{
|
||||||
int ret = 0, failed = 1;
|
struct cpufreq_governor *old_gov;
|
||||||
|
int ret;
|
||||||
|
|
||||||
pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
|
pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
|
||||||
new_policy->min, new_policy->max);
|
new_policy->min, new_policy->max);
|
||||||
|
|
||||||
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
|
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
|
||||||
|
|
||||||
if (new_policy->min > policy->max || new_policy->max < policy->min) {
|
if (new_policy->min > policy->max || new_policy->max < policy->min)
|
||||||
ret = -EINVAL;
|
return -EINVAL;
|
||||||
goto error_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* verify the cpu speed can be set within this limit */
|
/* verify the cpu speed can be set within this limit */
|
||||||
ret = cpufreq_driver->verify(new_policy);
|
ret = cpufreq_driver->verify(new_policy);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error_out;
|
return ret;
|
||||||
|
|
||||||
/* adjust if necessary - all reasons */
|
/* adjust if necessary - all reasons */
|
||||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||||
|
@ -2043,7 +2042,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||||
*/
|
*/
|
||||||
ret = cpufreq_driver->verify(new_policy);
|
ret = cpufreq_driver->verify(new_policy);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error_out;
|
return ret;
|
||||||
|
|
||||||
/* notification of the new policy */
|
/* notification of the new policy */
|
||||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||||
|
@ -2058,58 +2057,48 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||||
if (cpufreq_driver->setpolicy) {
|
if (cpufreq_driver->setpolicy) {
|
||||||
policy->policy = new_policy->policy;
|
policy->policy = new_policy->policy;
|
||||||
pr_debug("setting range\n");
|
pr_debug("setting range\n");
|
||||||
ret = cpufreq_driver->setpolicy(new_policy);
|
return cpufreq_driver->setpolicy(new_policy);
|
||||||
} else {
|
|
||||||
if (new_policy->governor != policy->governor) {
|
|
||||||
/* save old, working values */
|
|
||||||
struct cpufreq_governor *old_gov = policy->governor;
|
|
||||||
|
|
||||||
pr_debug("governor switch\n");
|
|
||||||
|
|
||||||
/* end old governor */
|
|
||||||
if (policy->governor) {
|
|
||||||
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
|
||||||
up_write(&policy->rwsem);
|
|
||||||
__cpufreq_governor(policy,
|
|
||||||
CPUFREQ_GOV_POLICY_EXIT);
|
|
||||||
down_write(&policy->rwsem);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* start new governor */
|
|
||||||
policy->governor = new_policy->governor;
|
|
||||||
if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
|
|
||||||
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
|
|
||||||
failed = 0;
|
|
||||||
} else {
|
|
||||||
up_write(&policy->rwsem);
|
|
||||||
__cpufreq_governor(policy,
|
|
||||||
CPUFREQ_GOV_POLICY_EXIT);
|
|
||||||
down_write(&policy->rwsem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (failed) {
|
|
||||||
/* new governor failed, so re-start old one */
|
|
||||||
pr_debug("starting governor %s failed\n",
|
|
||||||
policy->governor->name);
|
|
||||||
if (old_gov) {
|
|
||||||
policy->governor = old_gov;
|
|
||||||
__cpufreq_governor(policy,
|
|
||||||
CPUFREQ_GOV_POLICY_INIT);
|
|
||||||
__cpufreq_governor(policy,
|
|
||||||
CPUFREQ_GOV_START);
|
|
||||||
}
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto error_out;
|
|
||||||
}
|
|
||||||
/* might be a policy change, too, so fall through */
|
|
||||||
}
|
|
||||||
pr_debug("governor: change or update limits\n");
|
|
||||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
error_out:
|
if (new_policy->governor == policy->governor)
|
||||||
return ret;
|
goto out;
|
||||||
|
|
||||||
|
pr_debug("governor switch\n");
|
||||||
|
|
||||||
|
/* save old, working values */
|
||||||
|
old_gov = policy->governor;
|
||||||
|
/* end old governor */
|
||||||
|
if (old_gov) {
|
||||||
|
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
||||||
|
up_write(&policy->rwsem);
|
||||||
|
__cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT);
|
||||||
|
down_write(&policy->rwsem);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* start new governor */
|
||||||
|
policy->governor = new_policy->governor;
|
||||||
|
if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
|
||||||
|
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
up_write(&policy->rwsem);
|
||||||
|
__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
||||||
|
down_write(&policy->rwsem);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* new governor failed, so re-start old one */
|
||||||
|
pr_debug("starting governor %s failed\n", policy->governor->name);
|
||||||
|
if (old_gov) {
|
||||||
|
policy->governor = old_gov;
|
||||||
|
__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
|
||||||
|
__cpufreq_governor(policy, CPUFREQ_GOV_START);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
out:
|
||||||
|
pr_debug("governor: change or update limits\n");
|
||||||
|
return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2145,6 +2134,11 @@ int cpufreq_update_policy(unsigned int cpu)
|
||||||
*/
|
*/
|
||||||
if (cpufreq_driver->get) {
|
if (cpufreq_driver->get) {
|
||||||
new_policy.cur = cpufreq_driver->get(cpu);
|
new_policy.cur = cpufreq_driver->get(cpu);
|
||||||
|
if (WARN_ON(!new_policy.cur)) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto no_policy;
|
||||||
|
}
|
||||||
|
|
||||||
if (!policy->cur) {
|
if (!policy->cur) {
|
||||||
pr_debug("Driver did not initialize current freq");
|
pr_debug("Driver did not initialize current freq");
|
||||||
policy->cur = new_policy.cur;
|
policy->cur = new_policy.cur;
|
||||||
|
@ -2181,7 +2175,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
switch (action & ~CPU_TASKS_FROZEN) {
|
||||||
case CPU_ONLINE:
|
case CPU_ONLINE:
|
||||||
__cpufreq_add_dev(dev, NULL, frozen);
|
__cpufreq_add_dev(dev, NULL, frozen);
|
||||||
cpufreq_update_policy(cpu);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
|
|
|
@ -180,27 +180,25 @@ static void cpufreq_stats_free_table(unsigned int cpu)
|
||||||
cpufreq_cpu_put(policy);
|
cpufreq_cpu_put(policy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
||||||
struct cpufreq_frequency_table *table)
|
|
||||||
{
|
{
|
||||||
unsigned int i, j, count = 0, ret = 0;
|
unsigned int i, j, count = 0, ret = 0;
|
||||||
struct cpufreq_stats *stat;
|
struct cpufreq_stats *stat;
|
||||||
struct cpufreq_policy *current_policy;
|
|
||||||
unsigned int alloc_size;
|
unsigned int alloc_size;
|
||||||
unsigned int cpu = policy->cpu;
|
unsigned int cpu = policy->cpu;
|
||||||
|
struct cpufreq_frequency_table *table;
|
||||||
|
|
||||||
|
table = cpufreq_frequency_get_table(cpu);
|
||||||
|
if (unlikely(!table))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (per_cpu(cpufreq_stats_table, cpu))
|
if (per_cpu(cpufreq_stats_table, cpu))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
|
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
|
||||||
if ((stat) == NULL)
|
if ((stat) == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
current_policy = cpufreq_cpu_get(cpu);
|
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
|
||||||
if (current_policy == NULL) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto error_get_fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = sysfs_create_group(¤t_policy->kobj, &stats_attr_group);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error_out;
|
goto error_out;
|
||||||
|
|
||||||
|
@ -223,7 +221,7 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
||||||
stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
|
stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
|
||||||
if (!stat->time_in_state) {
|
if (!stat->time_in_state) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto error_out;
|
goto error_alloc;
|
||||||
}
|
}
|
||||||
stat->freq_table = (unsigned int *)(stat->time_in_state + count);
|
stat->freq_table = (unsigned int *)(stat->time_in_state + count);
|
||||||
|
|
||||||
|
@ -243,11 +241,10 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
||||||
stat->last_time = get_jiffies_64();
|
stat->last_time = get_jiffies_64();
|
||||||
stat->last_index = freq_table_get_index(stat, policy->cur);
|
stat->last_index = freq_table_get_index(stat, policy->cur);
|
||||||
spin_unlock(&cpufreq_stats_lock);
|
spin_unlock(&cpufreq_stats_lock);
|
||||||
cpufreq_cpu_put(current_policy);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
error_alloc:
|
||||||
|
sysfs_remove_group(&policy->kobj, &stats_attr_group);
|
||||||
error_out:
|
error_out:
|
||||||
cpufreq_cpu_put(current_policy);
|
|
||||||
error_get_fail:
|
|
||||||
kfree(stat);
|
kfree(stat);
|
||||||
per_cpu(cpufreq_stats_table, cpu) = NULL;
|
per_cpu(cpufreq_stats_table, cpu) = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -256,7 +253,6 @@ error_get_fail:
|
||||||
static void cpufreq_stats_create_table(unsigned int cpu)
|
static void cpufreq_stats_create_table(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpufreq_policy *policy;
|
struct cpufreq_policy *policy;
|
||||||
struct cpufreq_frequency_table *table;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "likely(!policy)" because normally cpufreq_stats will be registered
|
* "likely(!policy)" because normally cpufreq_stats will be registered
|
||||||
|
@ -266,9 +262,7 @@ static void cpufreq_stats_create_table(unsigned int cpu)
|
||||||
if (likely(!policy))
|
if (likely(!policy))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
table = cpufreq_frequency_get_table(policy->cpu);
|
__cpufreq_stats_create_table(policy);
|
||||||
if (likely(table))
|
|
||||||
__cpufreq_stats_create_table(policy, table);
|
|
||||||
|
|
||||||
cpufreq_cpu_put(policy);
|
cpufreq_cpu_put(policy);
|
||||||
}
|
}
|
||||||
|
@ -291,20 +285,14 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct cpufreq_policy *policy = data;
|
struct cpufreq_policy *policy = data;
|
||||||
struct cpufreq_frequency_table *table;
|
|
||||||
unsigned int cpu = policy->cpu;
|
|
||||||
|
|
||||||
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
|
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
|
||||||
cpufreq_stats_update_policy_cpu(policy);
|
cpufreq_stats_update_policy_cpu(policy);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
table = cpufreq_frequency_get_table(cpu);
|
|
||||||
if (!table)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (val == CPUFREQ_CREATE_POLICY)
|
if (val == CPUFREQ_CREATE_POLICY)
|
||||||
ret = __cpufreq_stats_create_table(policy, table);
|
ret = __cpufreq_stats_create_table(policy);
|
||||||
else if (val == CPUFREQ_REMOVE_POLICY)
|
else if (val == CPUFREQ_REMOVE_POLICY)
|
||||||
__cpufreq_stats_free_table(policy);
|
__cpufreq_stats_free_table(policy);
|
||||||
|
|
||||||
|
|
|
@ -99,8 +99,7 @@ struct cpudata {
|
||||||
u64 prev_aperf;
|
u64 prev_aperf;
|
||||||
u64 prev_mperf;
|
u64 prev_mperf;
|
||||||
unsigned long long prev_tsc;
|
unsigned long long prev_tsc;
|
||||||
int sample_ptr;
|
struct sample sample;
|
||||||
struct sample samples[SAMPLE_COUNT];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct cpudata **all_cpu_data;
|
static struct cpudata **all_cpu_data;
|
||||||
|
@ -154,7 +153,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
|
||||||
pid->setpoint = setpoint;
|
pid->setpoint = setpoint;
|
||||||
pid->deadband = deadband;
|
pid->deadband = deadband;
|
||||||
pid->integral = int_tofp(integral);
|
pid->integral = int_tofp(integral);
|
||||||
pid->last_err = setpoint - busy;
|
pid->last_err = int_tofp(setpoint) - int_tofp(busy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pid_p_gain_set(struct _pid *pid, int percent)
|
static inline void pid_p_gain_set(struct _pid *pid, int percent)
|
||||||
|
@ -586,15 +585,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
|
||||||
mperf = mperf >> FRAC_BITS;
|
mperf = mperf >> FRAC_BITS;
|
||||||
tsc = tsc >> FRAC_BITS;
|
tsc = tsc >> FRAC_BITS;
|
||||||
|
|
||||||
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
|
cpu->sample.aperf = aperf;
|
||||||
cpu->samples[cpu->sample_ptr].aperf = aperf;
|
cpu->sample.mperf = mperf;
|
||||||
cpu->samples[cpu->sample_ptr].mperf = mperf;
|
cpu->sample.tsc = tsc;
|
||||||
cpu->samples[cpu->sample_ptr].tsc = tsc;
|
cpu->sample.aperf -= cpu->prev_aperf;
|
||||||
cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
|
cpu->sample.mperf -= cpu->prev_mperf;
|
||||||
cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
|
cpu->sample.tsc -= cpu->prev_tsc;
|
||||||
cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
|
|
||||||
|
|
||||||
intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
|
intel_pstate_calc_busy(cpu, &cpu->sample);
|
||||||
|
|
||||||
cpu->prev_aperf = aperf;
|
cpu->prev_aperf = aperf;
|
||||||
cpu->prev_mperf = mperf;
|
cpu->prev_mperf = mperf;
|
||||||
|
@ -614,7 +612,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||||
{
|
{
|
||||||
int32_t core_busy, max_pstate, current_pstate;
|
int32_t core_busy, max_pstate, current_pstate;
|
||||||
|
|
||||||
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
|
core_busy = cpu->sample.core_pct_busy;
|
||||||
max_pstate = int_tofp(cpu->pstate.max_pstate);
|
max_pstate = int_tofp(cpu->pstate.max_pstate);
|
||||||
current_pstate = int_tofp(cpu->pstate.current_pstate);
|
current_pstate = int_tofp(cpu->pstate.current_pstate);
|
||||||
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||||
|
@ -648,7 +646,7 @@ static void intel_pstate_timer_func(unsigned long __data)
|
||||||
|
|
||||||
intel_pstate_sample(cpu);
|
intel_pstate_sample(cpu);
|
||||||
|
|
||||||
sample = &cpu->samples[cpu->sample_ptr];
|
sample = &cpu->sample;
|
||||||
|
|
||||||
intel_pstate_adjust_busy_pstate(cpu);
|
intel_pstate_adjust_busy_pstate(cpu);
|
||||||
|
|
||||||
|
@ -729,7 +727,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
|
||||||
cpu = all_cpu_data[cpu_num];
|
cpu = all_cpu_data[cpu_num];
|
||||||
if (!cpu)
|
if (!cpu)
|
||||||
return 0;
|
return 0;
|
||||||
sample = &cpu->samples[cpu->sample_ptr];
|
sample = &cpu->sample;
|
||||||
return sample->freq;
|
return sample->freq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue