c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1;
+ c->x86_num_siblings = 1;
c->x86_clflush_size = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
- smp_num_siblings = (ebx & 0xff0000) >> 16;
+ c->x86_num_siblings = (ebx & 0xff0000) >> 16;
- if (smp_num_siblings == 1) {
+ if (c->x86_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
- } else if (smp_num_siblings > 1 ) {
+ } else if (c->x86_num_siblings > 1 ) {
- if (smp_num_siblings > NR_CPUS) {
- printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
- smp_num_siblings = 1;
+ if (c->x86_num_siblings > NR_CPUS) {
+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", c->x86_num_siblings);
+ c->x86_num_siblings = 1;
return;
}
- index_msb = get_count_order(smp_num_siblings);
+ index_msb = get_count_order(c->x86_num_siblings);
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
- smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+ c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores;
- index_msb = get_count_order(smp_num_siblings) ;
+ index_msb = get_count_order(c->x86_num_siblings) ;
core_bits = get_count_order(c->x86_max_cores);
mcg.mc_socketid = phys_proc_id[cpu];
mcg.mc_coreid = cpu_core_id[cpu];
mcg.mc_apicid = cpu_physical_id(cpu);
- mcg.mc_core_threadid = mcg.mc_apicid & ( 1 << (smp_num_siblings - 1));
+ mcg.mc_core_threadid =
+ mcg.mc_apicid & ( 1 << (cpu_data[cpu].x86_num_siblings - 1));
rdmsrl(MSR_IA32_MCG_STATUS, mcg.mc_gstatus);
for ( i = 0; i < nr_mce_banks; i++ ) {
nmi_perfctr_msr = MSR_P4_IQ_PERFCTR0;
nmi_p4_cccr_val = P4_NMI_IQ_CCCR0;
- if ( smp_num_siblings == 2 )
+ if ( boot_cpu_data.x86_num_siblings == 2 )
nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
if (!(misc_enable & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
model = &op_p4_spec;
return 1;
#else
- switch (smp_num_siblings) {
+ switch (current_cpu_data.x86_num_siblings) {
case 1:
*cpu_type = "i386/p4";
model = &op_p4_spec;
static inline void setup_num_counters(void)
{
#ifdef CONFIG_SMP
- if (smp_num_siblings == 2)
+ if (boot_cpu_data.x86_num_siblings == 2) /* XXX */
num_counters = NUM_COUNTERS_HT2;
#endif
}
static int inline addr_increment(void)
{
#ifdef CONFIG_SMP
- return smp_num_siblings == 2 ? 2 : 1;
+ return boot_cpu_data.x86_num_siblings == 2 ? 2 : 1;
#else
return 1;
#endif
/* Set if we find a B stepping CPU */
static int __devinitdata smp_b_stepping;
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-#ifdef CONFIG_X86_HT
-EXPORT_SYMBOL(smp_num_siblings);
-#endif
-
/* Package ID of each logical CPU */
int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
cpu_set(cpu, cpu_sibling_setup_map);
- if (smp_num_siblings > 1) {
+ if (c[cpu].x86_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i] &&
cpu_core_id[cpu] == cpu_core_id[i]) {
cpu_set(cpu, cpu_sibling_map[cpu]);
}
- if (current_cpu_data.x86_max_cores == 1) {
+ if (c[cpu].x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
int x86_power;
__u32 x86_max_cores; /* cpuid returned max cores value */
__u32 booted_cores; /* number of cores as seen by OS */
+ __u32 x86_num_siblings; /* cpuid logical cpus per chip value */
__u32 apicid;
unsigned short x86_clflush_size;
} __cacheline_aligned;
extern void smp_alloc_memory(void);
extern int pic_mode;
-extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];