case CPU_DYING:
hvm_cpu_down();
break;
+ case CPU_UP_CANCELED:
case CPU_DEAD:
hvm_funcs.cpu_dead(cpu);
break;
struct vcpu *v;
printk("CPU\tNMI\n");
- for_each_possible_cpu ( i )
+ for_each_online_cpu ( i )
printk("%3d\t%3d\n", i, nmi_count(i));
if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
int i;
- for (i = 0; i < NR_CPUS; ++i) {
- if (!test_bit(i, &cpu_online_map))
- continue;
-
+ for_each_online_cpu (i) {
cpu_msrs[i].counters = xmalloc_bytes(counters_size);
if (!cpu_msrs[i].counters) {
success = 0;
init_trace_bufs();
- init_tmem();
-
console_endboot();
/* Hide UART from DOM0 if we're using it */
* construct cpu_sibling_map, so that we can tell sibling CPUs
* efficiently.
*/
- for_each_possible_cpu(cpu) {
- cpus_clear(per_cpu(cpu_sibling_map, cpu));
- cpus_clear(per_cpu(cpu_core_map, cpu));
- }
-
cpu_set(0, per_cpu(cpu_sibling_map, 0));
cpu_set(0, per_cpu(cpu_core_map, 0));
unsigned long flags;
s_time_t now;
+ /* If we have constant-rate TSCs then scale factor can be shared. */
+ if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
+ this_cpu(cpu_time).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
+
local_irq_save(flags);
rdtscll(t->local_tsc_stamp);
now = read_platform_stime();
/* If we have constant-rate TSCs then scale factor can be shared. */
if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
{
- int cpu;
- for_each_possible_cpu ( cpu )
- per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
/* If TSCs are not marked as 'reliable', re-sync during rendezvous. */
if ( !boot_cpu_has(X86_FEATURE_TSC_RELIABLE) )
time_calibration_rendezvous_fn = time_calibration_tsc_rendezvous;
cpumask_t cpu_exclude_map;
/* Do an initial CPU placement. Pick the least-populated CPU. */
- nr_cpus = last_cpu(cpu_possible_map) + 1;
+ nr_cpus = last_cpu(cpu_online_map) + 1;
cnt = xmalloc_array(unsigned int, nr_cpus);
if ( cnt )
{
rcu_read_lock(&domlist_read_lock);
for_each_domain ( d )
for_each_vcpu ( d, v )
- if ( !test_bit(_VPF_down, &v->pause_flags) )
- cnt[v->processor]++;
+ if ( !test_bit(_VPF_down, &v->pause_flags)
+ && ((cpu = v->processor) < nr_cpus) )
+ cnt[cpu]++;
rcu_read_unlock(&domlist_read_lock);
}
}
printk("\n");
}
-
- arch_perfc_printall();
}
void perfc_reset(unsigned char key)
switch ( perfc_info[i].type )
{
case TYPE_SINGLE:
- for_each_possible_cpu ( cpu )
+ for_each_online_cpu ( cpu )
per_cpu(perfcounters, cpu)[j] = 0;
case TYPE_S_SINGLE:
++j;
break;
case TYPE_ARRAY:
- for_each_possible_cpu ( cpu )
+ for_each_online_cpu ( cpu )
memset(per_cpu(perfcounters, cpu) + j, 0,
perfc_info[i].nr_elements * sizeof(perfc_t));
case TYPE_S_ARRAY:
static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
static xen_sysctl_perfc_val_t *perfc_vals;
static unsigned int perfc_nbr_vals;
-static int perfc_init = 0;
+static cpumask_t perfc_cpumap;
+
static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val)
{
unsigned int i, j, v;
/* We only copy the name and array-size information once. */
- if ( !perfc_init )
+ if ( !cpus_equal(cpu_online_map, perfc_cpumap) )
{
+ unsigned int nr_cpus;
+ perfc_cpumap = cpu_online_map;
+ nr_cpus = cpus_weight(perfc_cpumap);
+
+ perfc_nbr_vals = 0;
+
for ( i = 0; i < NR_PERFCTRS; i++ )
{
safe_strcpy(perfc_d[i].name, perfc_info[i].name);
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- perfc_d[i].nr_vals = num_possible_cpus();
+ perfc_d[i].nr_vals = nr_cpus;
break;
case TYPE_ARRAY:
case TYPE_S_ARRAY:
}
perfc_nbr_vals += perfc_d[i].nr_vals;
}
+
+ xfree(perfc_vals);
perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
- perfc_init = 1;
}
if ( guest_handle_is_null(desc) )
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- for_each_possible_cpu ( cpu )
+ for_each_cpu_mask ( cpu, perfc_cpumap )
perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
++j;
break;
case TYPE_ARRAY:
case TYPE_S_ARRAY:
memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals));
- for_each_possible_cpu ( cpu )
+ for_each_cpu_mask ( cpu, perfc_cpumap )
{
perfc_t *counters = per_cpu(perfcounters, cpu) + j;
unsigned int k;
*/
#include <xen/config.h>
+#include <xen/init.h>
#include <xen/lib.h>
#include <xen/types.h>
#include <xen/errno.h>
}
EXPORT_SYMBOL(radix_tree_destroy);
-static /*__init*/ unsigned long __maxindex(unsigned int height)
+static unsigned long __init __maxindex(unsigned int height)
{
unsigned int tmp = height * RADIX_TREE_MAP_SHIFT;
unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1;
return index;
}
-/*__init*/ void radix_tree_init(void)
+void __init radix_tree_init(void)
{
unsigned int i;
void __init rcu_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
- cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&cpu_nfb);
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
{
struct vcpu *p;
struct domain *d;
- unsigned int nr_cpus = last_cpu(cpu_possible_map) + 1;
+ unsigned int cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
int *sumw = xmalloc_array(int, nr_cpus);
s_time_t *sumt = xmalloc_array(s_time_t, nr_cpus);
continue;
for_each_vcpu( d, p )
{
+ if ( (cpu = p->processor) >= nr_cpus )
+ continue;
+
if ( EDOM_INFO(p)->weight )
{
- sumw[p->processor] += EDOM_INFO(p)->weight;
+ sumw[cpu] += EDOM_INFO(p)->weight;
}
else
{
/*check for overflows*/
ASSERT((WEIGHT_PERIOD < ULONG_MAX)
&& (EDOM_INFO(p)->slice_orig < ULONG_MAX));
- sumt[p->processor] +=
+ sumt[cpu] +=
(WEIGHT_PERIOD * EDOM_INFO(p)->slice_orig) /
EDOM_INFO(p)->period_orig;
}
{
for_each_vcpu ( d, p )
{
+ if ( (cpu = p->processor) >= nr_cpus )
+ continue;
if ( EDOM_INFO(p)->weight )
{
EDOM_INFO(p)->period_orig =
EDOM_INFO(p)->slice_orig =
EDOM_INFO(p)->slice =
(EDOM_INFO(p)->weight *
- (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[p->processor])) /
- sumw[p->processor];
+ (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu];
}
}
}
#include <xen/errno.h>
#include <xen/guest_access.h>
#include <xen/multicall.h>
+#include <xen/cpu.h>
#include <public/sched.h>
#include <xsm/xsm.h>
return NULL;
}
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE:
+ per_cpu(scheduler, cpu) = &ops;
+ spin_lock_init(&per_cpu(schedule_data, cpu)._lock);
+ per_cpu(schedule_data, cpu).schedule_lock
+ = &per_cpu(schedule_data, cpu)._lock;
+ init_timer(&per_cpu(schedule_data, cpu).s_timer,
+ s_timer_fn, NULL, cpu);
+ break;
+ case CPU_DEAD:
+ kill_timer(&per_cpu(schedule_data, cpu).s_timer);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
/* Initialise the data structures. */
void __init scheduler_init(void)
{
+ void *hcpu = (void *)(long)smp_processor_id();
int i;
open_softirq(SCHEDULE_SOFTIRQ, schedule);
ops = *schedulers[0];
}
- for_each_possible_cpu ( i )
- {
- per_cpu(scheduler, i) = &ops;
- spin_lock_init(&per_cpu(schedule_data, i)._lock);
- per_cpu(schedule_data, i).schedule_lock
- = &per_cpu(schedule_data, i)._lock;
- init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
- }
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+ register_cpu_notifier(&cpu_nfb);
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
if ( SCHED_OP(&ops, init, 1) )
local_irq_enable();
}
-static int __init cpu_stopmachine_init(void)
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
{
- unsigned int cpu;
- for_each_possible_cpu ( cpu )
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if ( action == CPU_UP_PREPARE )
tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
stopmachine_action, cpu);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+static int __init cpu_stopmachine_init(void)
+{
+ unsigned int cpu;
+ for_each_online_cpu ( cpu )
+ {
+ void *hcpu = (void *)(long)cpu;
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+ }
+ register_cpu_notifier(&cpu_nfb);
return 0;
}
__initcall(cpu_stopmachine_init);
#include <xen/cpu.h>
/* Some subsystems call into us before we are initialised. We ignore them. */
-static cpumask_t tasklets_initialised;
+static bool_t tasklets_initialised;
DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
spin_lock_irqsave(&tasklet_lock, flags);
- if ( cpu_isset(cpu, tasklets_initialised) && !t->is_dead )
+ if ( tasklets_initialised && !t->is_dead )
{
t->scheduled_on = cpu;
if ( !t->is_running )
switch ( action )
{
case CPU_UP_PREPARE:
- if ( !cpu_test_and_set(cpu, tasklets_initialised) )
- INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+ INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
break;
+ case CPU_UP_CANCELED:
case CPU_DEAD:
migrate_tasklets_from_cpu(cpu);
break;
void *hcpu = (void *)(long)smp_processor_id();
cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
register_cpu_notifier(&cpu_nfb);
+ tasklets_initialised = 1;
}
/*
#include <xen/timer.h>
#include <xen/keyhandler.h>
#include <xen/percpu.h>
+#include <xen/cpu.h>
#include <asm/system.h>
#include <asm/desc.h>
.desc = "dump timer queues"
};
+static struct timer *dummy_heap;
+
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if ( action == CPU_UP_PREPARE )
+ {
+ spin_lock_init(&per_cpu(timers, cpu).lock);
+ per_cpu(timers, cpu).heap = &dummy_heap;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
void __init timer_init(void)
{
- static struct timer *dummy_heap;
- int i;
+ void *cpu = (void *)(long)smp_processor_id();
open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
SET_HEAP_SIZE(&dummy_heap, 0);
SET_HEAP_LIMIT(&dummy_heap, 0);
- for_each_possible_cpu ( i )
- {
- spin_lock_init(&per_cpu(timers, i).lock);
- per_cpu(timers, i).heap = &dummy_heap;
- }
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
+ register_cpu_notifier(&cpu_nfb);
register_keyhandler('a', &dump_timerq_keyhandler);
}
}
/* called at hypervisor startup */
-EXPORT void init_tmem(void)
+static int __init init_tmem(void)
{
int i;
if ( !tmh_enabled() )
- return;
+ return 0;
radix_tree_init();
if ( tmh_dedup_enabled() )
}
else
printk("tmem: initialization FAILED\n");
+
+ return 0;
}
+__initcall(init_tmem);
/*
* Local variables:
#include <xen/lzo.h> /* compression code */
#include <xen/paging.h>
#include <xen/domain_page.h>
+#include <xen/cpu.h>
#define EXPORT /* indicates code other modules are dependent upon */
tmh_free_page(virt_to_page(page_va));
}
-static int tmh_mempool_init(void)
+static int __init tmh_mempool_init(void)
{
tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
/****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
-EXPORT int tmh_init(void)
-{
#ifndef __i386__
- int dstmem_order, workmem_order;
- bool_t bad_alloc = 0;
- struct page_info *pi;
- unsigned char *p1, *p2;
- int cpu;
+
+static int dstmem_order, workmem_order;
+
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE: {
+ if ( per_cpu(dstmem, cpu) == NULL )
+ {
+ struct page_info *p = alloc_domheap_pages(0, dstmem_order, 0);
+ per_cpu(dstmem, cpu) = p ? page_to_virt(p) : NULL;
+ }
+ if ( per_cpu(workmem, cpu) == NULL )
+ {
+ struct page_info *p = alloc_domheap_pages(0, workmem_order, 0);
+ per_cpu(workmem, cpu) = p ? page_to_virt(p) : NULL;
+ }
+ break;
+ }
+ case CPU_DEAD:
+ case CPU_UP_CANCELED: {
+ if ( per_cpu(dstmem, cpu) != NULL )
+ {
+ struct page_info *p = virt_to_page(per_cpu(dstmem, cpu));
+ free_domheap_pages(p, dstmem_order);
+ per_cpu(dstmem, cpu) = NULL;
+ }
+ if ( per_cpu(workmem, cpu) != NULL )
+ {
+ struct page_info *p = virt_to_page(per_cpu(workmem, cpu));
+ free_domheap_pages(p, workmem_order);
+ per_cpu(workmem, cpu) = NULL;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+EXPORT int __init tmh_init(void)
+{
+ unsigned int cpu;
if ( !tmh_mempool_init() )
return 0;
dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
- for_each_possible_cpu ( cpu )
+
+ for_each_online_cpu ( cpu )
{
- pi = alloc_domheap_pages(0,dstmem_order,0);
- per_cpu(dstmem, cpu) = p1 = ((pi == NULL) ? NULL : page_to_virt(pi));
- pi = alloc_domheap_pages(0,workmem_order,0);
- per_cpu(workmem, cpu) = p2 = ((pi == NULL) ? NULL : page_to_virt(pi));
- if ( (p1 == NULL) || (p2 == NULL) )
- bad_alloc++;
+ void *hcpu = (void *)(long)cpu;
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
}
- if ( bad_alloc )
- printk("tmem: can't allocate compression buffers for %d cpus\n",
- bad_alloc);
-#endif
+
+ register_cpu_notifier(&cpu_nfb);
+
return 1;
}
+
+#else
+
+EXPORT int __init tmh_init(void)
+{
+ return 1;
+}
+
+#endif
#include <asm/vhpt.h>
#include <asm/privop_stat.h>
-static inline void arch_perfc_printall(void)
-{
-}
-
static inline void arch_perfc_reset(void)
{
reset_privop_addrs();
#ifndef __ASM_PERFC_H__
#define __ASM_PERFC_H__
-static inline void arch_perfc_printall(void)
-{
-}
-
static inline void arch_perfc_reset(void)
{
}
void subarch_init_memory(void);
void init_IRQ(void);
-void init_tmem(void);
void vesa_init(void);
void vesa_mtrr_init(void);
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_callin_map;
-/* cpu_possible_map declared in <xen/cpumask.h> */
/* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void)
#ifndef __XEN_TMEM_H__
#define __XEN_TMEM_H__
-extern void init_tmem(void);
extern void tmem_destroy(void *);
extern void *tmem_relinquish_pages(unsigned int, unsigned int);
extern int opt_tmem;
char *page = NULL;
int len = 0;
int length = 0;
- long long idx = 0;
int cpu;
struct avc_cache_stats *st;
length += len;
count -= len;
- for ( cpu = idx; cpu < NR_CPUS; ++cpu )
+ for_each_online_cpu ( cpu )
{
- if ( !cpu_possible(cpu) )
- continue;
- idx = cpu + 1;
st = &per_cpu(avc_cache_stats, cpu);
len = snprintf(page, PAGE_SIZE, "%u %u %u %u %u %u\n", st->lookups,