struct processor_pminfo processor_pminfo[NR_CPUS];
struct cpufreq_policy xen_px_policy[NR_CPUS];
+static cpumask_t *cpufreq_dom_pt;
+static cpumask_t cpufreq_dom_mask;
+static unsigned int cpufreq_dom_max;
+
enum {
UNDEFINED_CAPABLE = 0,
SYSTEM_INTEL_MSR_CAPABLE,
struct processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int max_freq;
- unsigned int resume;
unsigned int cpu_feature;
};
next_perf_state = data->freq_table[next_state].index;
if (perf->state == next_perf_state) {
- if (unlikely(data->resume)) {
- printk("xen_pminfo: @acpi_cpufreq_target, "
- "Called after resume, resetting to P%d\n",
+ if (unlikely(policy->resume)) {
+ printk(KERN_INFO "Called after resume, resetting to P%d\n",
next_perf_state);
- data->resume = 0;
+ policy->resume = 0;
}
- else
+ else {
+ printk(KERN_INFO "Already at target state (P%d)\n",
+ next_perf_state);
return 0;
+ }
}
switch (data->cpu_feature) {
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
*/
- data->resume = 1;
+ policy->resume = 1;
return result;
.init = acpi_cpufreq_cpu_init,
};
-int acpi_cpufreq_init(void)
+void cpufreq_dom_exit(void)
{
- unsigned int i, ret = 0;
- unsigned int dom, max_dom = 0;
- cpumask_t *pt, dom_mask;
+ cpufreq_dom_max = 0;
+ cpus_clear(cpufreq_dom_mask);
+ if (cpufreq_dom_pt)
+ xfree(cpufreq_dom_pt);
+}
+
+int cpufreq_dom_init(void)
+{
+ unsigned int i;
- cpus_clear(dom_mask);
+ cpufreq_dom_max = 0;
+ cpus_clear(cpufreq_dom_mask);
for_each_online_cpu(i) {
- cpu_set(processor_pminfo[i].perf.domain_info.domain, dom_mask);
- if (max_dom < processor_pminfo[i].perf.domain_info.domain)
- max_dom = processor_pminfo[i].perf.domain_info.domain;
+ cpu_set(processor_pminfo[i].perf.domain_info.domain, cpufreq_dom_mask);
+ if (cpufreq_dom_max < processor_pminfo[i].perf.domain_info.domain)
+ cpufreq_dom_max = processor_pminfo[i].perf.domain_info.domain;
}
- max_dom++;
+ cpufreq_dom_max++;
- pt = xmalloc_array(cpumask_t, max_dom);
- if (!pt)
+ cpufreq_dom_pt = xmalloc_array(cpumask_t, cpufreq_dom_max);
+ if (!cpufreq_dom_pt)
return -ENOMEM;
- memset(pt, 0, max_dom * sizeof(cpumask_t));
+ memset(cpufreq_dom_pt, 0, cpufreq_dom_max * sizeof(cpumask_t));
- /* get cpumask of each psd domain */
for_each_online_cpu(i)
- cpu_set(i, pt[processor_pminfo[i].perf.domain_info.domain]);
+ cpu_set(i, cpufreq_dom_pt[processor_pminfo[i].perf.domain_info.domain]);
for_each_online_cpu(i)
- processor_pminfo[i].perf.shared_cpu_map =
- pt[processor_pminfo[i].perf.domain_info.domain];
+ processor_pminfo[i].perf.shared_cpu_map =
+ cpufreq_dom_pt[processor_pminfo[i].perf.domain_info.domain];
- cpufreq_driver = &acpi_cpufreq_driver;
+ return 0;
+}
+
+static int cpufreq_cpu_init(void)
+{
+ int i, ret = 0;
- /* setup cpufreq infrastructure */
for_each_online_cpu(i) {
xen_px_policy[i].cpu = i;
ret = px_statistic_init(i);
if (ret)
- goto out;
+ return ret;
ret = acpi_cpufreq_cpu_init(&xen_px_policy[i]);
if (ret)
- goto out;
+ return ret;
}
+ return ret;
+}
+
+int cpufreq_dom_dbs(unsigned int event)
+{
+ int cpu, dom, ret = 0;
- /* setup ondemand cpufreq */
- for (dom=0; dom<max_dom; dom++) {
- if (!cpu_isset(dom, dom_mask))
+ for (dom=0; dom<cpufreq_dom_max; dom++) {
+ if (!cpu_isset(dom, cpufreq_dom_mask))
continue;
- i = first_cpu(pt[dom]);
- ret = cpufreq_governor_dbs(&xen_px_policy[i], CPUFREQ_GOV_START);
+ cpu = first_cpu(cpufreq_dom_pt[dom]);
+ ret = cpufreq_governor_dbs(&xen_px_policy[cpu], event);
if (ret)
- goto out;
+ return ret;
}
+ return ret;
+}
+
+int acpi_cpufreq_init(void)
+{
+ int ret = 0;
+
+ /* setup cpumask of psd dom and shared cpu map of cpu */
+ ret = cpufreq_dom_init();
+ if (ret)
+ goto err;
+
+ /* setup cpufreq driver */
+ cpufreq_driver = &acpi_cpufreq_driver;
+
+ /* setup cpufreq infrastructure */
+ ret = cpufreq_cpu_init();
+ if (ret)
+ goto err;
+
+ /* setup cpufreq dbs according to dom coordiation */
+ ret = cpufreq_dom_dbs(CPUFREQ_GOV_START);
+ if (ret)
+ goto err;
+
+ return ret;
-out:
- xfree(pt);
-
+err:
+ cpufreq_dom_exit();
return ret;
}
* Px STATISTIC INFO *
*********************************************************************/
+void px_statistic_suspend(void)
+{
+ int cpu;
+ uint64_t now;
+
+ now = NOW();
+
+ for_each_online_cpu(cpu) {
+ struct pm_px *pxpt = &px_statistic_data[cpu];
+ pxpt->u.pt[pxpt->u.cur].residency +=
+ now - pxpt->prev_state_wall;
+ }
+}
+
+void px_statistic_resume(void)
+{
+ int cpu;
+ uint64_t now;
+
+ now = NOW();
+
+ for_each_online_cpu(cpu) {
+ struct pm_px *pxpt = &px_statistic_data[cpu];
+ pxpt->prev_state_wall = now;
+ }
+}
+
void px_statistic_update(cpumask_t cpumask, uint8_t from, uint8_t to)
{
uint32_t i;
return ret;
}
+
+
+/*********************************************************************
+ * CPUFREQ SUSPEND/RESUME *
+ *********************************************************************/
+
+void cpufreq_suspend(void)
+{
+ int cpu;
+
+ /* to protect the case when Px was controlled by dom0-kernel */
+ /* or when CPU_FREQ not set in which case ACPI Px objects not parsed */
+ for_each_online_cpu(cpu) {
+ struct processor_performance *perf = &processor_pminfo[cpu].perf;
+
+ if (!perf->init)
+ return;
+ }
+
+ cpufreq_dom_dbs(CPUFREQ_GOV_STOP);
+
+ cpufreq_dom_exit();
+
+ px_statistic_suspend();
+}
+
+int cpufreq_resume(void)
+{
+ int cpu, ret = 0;
+
+ /* 1. to protect the case when Px was controlled by dom0-kernel */
+ /* or when CPU_FREQ not set in which case ACPI Px objects not parsed */
+ /* 2. set state and resume flag to sync cpu to right state and freq */
+ for_each_online_cpu(cpu) {
+ struct processor_performance *perf = &processor_pminfo[cpu].perf;
+ struct cpufreq_policy *policy = &xen_px_policy[cpu];
+
+ if (!perf->init)
+ goto err;
+ perf->state = 0;
+ policy->resume = 1;
+ }
+
+ px_statistic_resume();
+
+ ret = cpufreq_dom_init();
+ if (ret)
+ goto err;
+
+ ret = cpufreq_dom_dbs(CPUFREQ_GOV_START);
+ if (ret)
+ goto err;
+
+ return ret;
+
+err:
+ cpufreq_dom_exit();
+ return ret;
+}