return 0;
}
-#define num_physpages 0
-
-/*
- * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
- * misexecution of code under Linux. Owners of such processors should
- * contact AMD for precise details and a CPU swap.
- *
- * See http://www.multimania.com/poulot/k6bug.html
- * http://www.amd.com/K6/k6docs/revgd.html
- *
- * The following test is erm.. interesting. AMD neglected to up
- * the chip setting when fixing the bug but they also tweaked some
- * performance at the same time..
- */
-
-extern void vide(void);
-__asm__(".text\n.align 4\nvide: ret");
-
/* Can this system suffer from TSC drift due to C1 clock ramping? */
static int c1_ramping_may_cause_clock_drift(struct cpuinfo_x86 *c)
{
static void __devinit init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
- int mbytes = num_physpages >> (20-PAGE_SHIFT);
int r;
#ifdef CONFIG_SMP
switch(c->x86)
{
- case 4:
- /*
- * General Systems BIOSen alias the cpu frequency registers
- * of the Elan at 0x000df000. Unfortuantly, one of the Linux
- * drivers subsequently pokes it, and changes the CPU speed.
- * Workaround : Remove the unneeded alias.
- */
-#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
-#define CBAR_ENB (0x80000000)
-#define CBAR_KEY (0X000000CB)
- if (c->x86_model==9 || c->x86_model == 10) {
- if (inl (CBAR) & CBAR_ENB)
- outl (0 | CBAR_KEY, CBAR);
- }
- break;
- case 5:
- if( c->x86_model < 6 )
- {
- /* Based on AMD doc 20734R - June 2000 */
- if ( c->x86_model == 0 ) {
- clear_bit(X86_FEATURE_APIC, c->x86_capability);
- set_bit(X86_FEATURE_PGE, c->x86_capability);
- }
- break;
- }
-
- if ( c->x86_model == 6 && c->x86_mask == 1 ) {
- const int K6_BUG_LOOP = 1000000;
- int n;
- void (*f_vide)(void);
- unsigned long d, d2;
-
- printk(KERN_INFO "AMD K6 stepping B detected - ");
-
- /*
- * It looks like AMD fixed the 2.6.2 bug and improved indirect
- * calls at the same time.
- */
-
- n = K6_BUG_LOOP;
- f_vide = vide;
- rdtscl(d);
- while (n--)
- f_vide();
- rdtscl(d2);
- d = d2-d;
-
- if (d > 20*K6_BUG_LOOP)
- printk("system stability may be impaired when more than 32 MB are used.\n");
- else
- printk("probably OK (after B9730xxxx).\n");
- printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
- }
-
- /* K6 with old style WHCR */
- if (c->x86_model < 8 ||
- (c->x86_model== 8 && c->x86_mask < 8)) {
- /* We can only write allocate on the low 508Mb */
- if(mbytes>508)
- mbytes=508;
-
- rdmsr(MSR_K6_WHCR, l, h);
- if ((l&0x0000FFFF)==0) {
- unsigned long flags;
- l=(1<<0)|((mbytes/4)<<1);
- local_irq_save(flags);
- wbinvd();
- wrmsr(MSR_K6_WHCR, l, h);
- local_irq_restore(flags);
- printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
- mbytes);
- }
- break;
- }
-
- if ((c->x86_model == 8 && c->x86_mask >7) ||
- c->x86_model == 9 || c->x86_model == 13) {
- /* The more serious chips .. */
-
- if(mbytes>4092)
- mbytes=4092;
-
- rdmsr(MSR_K6_WHCR, l, h);
- if ((l&0xFFFF0000)==0) {
- unsigned long flags;
- l=((mbytes>>2)<<22)|(1<<16);
- local_irq_save(flags);
- wbinvd();
- wrmsr(MSR_K6_WHCR, l, h);
- local_irq_restore(flags);
- printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
- mbytes);
- }
-
- /* Set MTRR capability flag if appropriate */
- if (c->x86_model == 13 || c->x86_model == 9 ||
- (c->x86_model == 8 && c->x86_mask >= 8))
- set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
- break;
- }
-
- if (c->x86_model == 10) {
- /* AMD Geode LX is model 10 */
- /* placeholder for any needed mods */
- break;
- }
- break;
- case 6: /* An Athlon/Duron */
+ case 6: /* An Athlon/Duron */
- /* Bit 15 of Athlon specific MSR 15, needs to be 0
- * to enable SSE on Palomino/Morgan/Barton CPU's.
- * If the BIOS didn't enable it already, enable it here.
- */
- if (c->x86_model >= 6 && c->x86_model <= 10) {
- if (!cpu_has(c, X86_FEATURE_XMM)) {
- printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
- rdmsr(MSR_K7_HWCR, l, h);
- l &= ~0x00008000;
- wrmsr(MSR_K7_HWCR, l, h);
- set_bit(X86_FEATURE_XMM, c->x86_capability);
- }
+ /* Bit 15 of Athlon specific MSR 15, needs to be 0
+ * to enable SSE on Palomino/Morgan/Barton CPU's.
+ * If the BIOS didn't enable it already, enable it here.
+ */
+ if (c->x86_model >= 6 && c->x86_model <= 10) {
+ if (!cpu_has(c, X86_FEATURE_XMM)) {
+ printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
+ rdmsr(MSR_K7_HWCR, l, h);
+ l &= ~0x00008000;
+ wrmsr(MSR_K7_HWCR, l, h);
+ set_bit(X86_FEATURE_XMM, c->x86_capability);
}
+ }
- /* It's been determined by AMD that Athlons since model 8 stepping 1
- * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
- * As per AMD technical note 27212 0.2
- */
- if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
- rdmsr(MSR_K7_CLK_CTL, l, h);
- if ((l & 0xfff00000) != 0x20000000) {
- printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
- ((l & 0x000fffff)|0x20000000));
- wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
- }
+ /* It's been determined by AMD that Athlons since model 8 stepping 1
+ * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
+ * As per AMD technical note 27212 0.2
+ */
+ if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
+ rdmsr(MSR_K7_CLK_CTL, l, h);
+ if ((l & 0xfff00000) != 0x20000000) {
+ printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
+ ((l & 0x000fffff)|0x20000000));
+ wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
}
- break;
- }
+ }
+ set_bit(X86_FEATURE_K7, c->x86_capability);
+ break;
- switch (c->x86) {
case 0xf:
/* Use K8 tuning for Fam10h and Fam11h */
case 0x10 ... 0x17:
if (acpi_smi_cmd && (acpi_enable_value | acpi_disable_value))
pv_post_outb_hook = check_disable_c1e;
break;
- case 6:
- set_bit(X86_FEATURE_K7, c->x86_capability);
- break;
}
display_cacheinfo(c);
if (c->x86 >= 0x10 && !force_mwait)
clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
- /* K6s reports MCEs but don't actually have all the MSRs */
- if (c->x86 < 6)
- clear_bit(X86_FEATURE_MCE, c->x86_capability);
-
#ifdef __x86_64__
/* AMD CPUs do not support SYSENTER outside of legacy mode. */
clear_bit(X86_FEATURE_SEP, c->x86_capability);
set_cpuidmask(c);
}
-static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
- /* AMD errata T13 (order #21922) */
- if ((c->x86 == 6)) {
- if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
- size = 64;
- if (c->x86_model == 4 &&
- (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
- size = 256;
- }
- return size;
-}
-
static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
- .c_models = {
- { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
- {
- [3] = "486 DX/2",
- [7] = "486 DX/2-WB",
- [8] = "486 DX/4",
- [9] = "486 DX/4-WB",
- [14] = "Am5x86-WT",
- [15] = "Am5x86-WB"
- }
- },
- },
.c_init = init_amd,
.c_identify = generic_identify,
- .c_size_cache = amd_size_cache,
};
int __init amd_init_cpu(void)
{
/* Not much we can do here... */
/* Check if at least it has cpuid */
- if (c->cpuid_level == -1) {
- /* No cpuid. It must be an ancient CPU */
- if (c->x86 == 4)
- safe_strcpy(c->x86_model_id, "486");
- else if (c->x86 == 3)
- safe_strcpy(c->x86_model_id, "386");
- }
+ BUG_ON(c->cpuid_level == -1);
}
static struct cpu_dev default_cpu = {
}
-/* Probe for the CPUID instruction */
-static int __cpuinit have_cpuid_p(void)
-{
- return flag_is_changeable_p(X86_EFLAGS_ID);
-}
-
/* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
The others are not touched to avoid unwanted side effects.
static void __init early_cpu_detect(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
+ u32 cap4, tfms, cap0, misc;
c->x86_cache_alignment = 32;
- if (!have_cpuid_p())
- return;
-
/* Get vendor name */
cpuid(0x00000000, &c->cpuid_level,
(int *)&c->x86_vendor_id[0],
get_cpu_vendor(c, 1);
- c->x86 = 4;
- if (c->cpuid_level >= 0x00000001) {
- u32 cap4, tfms, cap0, misc;
- cpuid(0x00000001, &tfms, &misc, &cap4, &cap0);
- c->x86 = (tfms >> 8) & 15;
- c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf)
- c->x86 += (tfms >> 20) & 0xff;
- if (c->x86 >= 0x6)
- c->x86_model += ((tfms >> 16) & 0xF) << 4;
- c->x86_mask = tfms & 15;
- cap0 &= ~cleared_caps[0];
- cap4 &= ~cleared_caps[4];
- if (cap0 & (1<<19))
- c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
- /* Leaf 0x1 capabilities filled in early for Xen. */
- c->x86_capability[0] = cap0;
- c->x86_capability[4] = cap4;
- }
+ cpuid(0x00000001, &tfms, &misc, &cap4, &cap0);
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf)
+ c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ c->x86_mask = tfms & 15;
+ cap0 &= ~cleared_caps[0];
+ cap4 &= ~cleared_caps[4];
+ if (cap0 & (1<<19))
+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+ /* Leaf 0x1 capabilities filled in early for Xen. */
+ c->x86_capability[0] = cap0;
+ c->x86_capability[4] = cap4;
}
void __cpuinit generic_identify(struct cpuinfo_x86 * c)
{
- u32 tfms, xlvl;
-
- if (have_cpuid_p()) {
- /* Get vendor name */
- cpuid(0x00000000, &c->cpuid_level,
- (int *)&c->x86_vendor_id[0],
- (int *)&c->x86_vendor_id[8],
- (int *)&c->x86_vendor_id[4]);
+ u32 tfms, xlvl, capability, excap, ebx;
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
- get_cpu_vendor(c, 0);
- /* Initialize the standard set of capabilities */
- /* Note that the vendor-specific code below might override */
+ get_cpu_vendor(c, 0);
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
- /* Intel-defined flags: level 0x00000001 */
- if ( c->cpuid_level >= 0x00000001 ) {
- u32 capability, excap, ebx;
- cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
- c->x86_capability[0] = capability;
- c->x86_capability[4] = excap;
- c->x86 = (tfms >> 8) & 15;
- c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf)
- c->x86 += (tfms >> 20) & 0xff;
- if (c->x86 >= 0x6)
- c->x86_model += ((tfms >> 16) & 0xF) << 4;
- c->x86_mask = tfms & 15;
- if ( cpu_has(c, X86_FEATURE_CLFLSH) )
- c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
- } else {
- /* Have CPUID level 0 only - unheard of */
- c->x86 = 4;
- }
-
- /* AMD-defined flags: level 0x80000001 */
- xlvl = cpuid_eax(0x80000000);
- if ( (xlvl & 0xffff0000) == 0x80000000 ) {
- if ( xlvl >= 0x80000001 ) {
- c->x86_capability[1] = cpuid_edx(0x80000001);
- c->x86_capability[6] = cpuid_ecx(0x80000001);
- }
- if ( xlvl >= 0x80000004 )
- get_model_name(c); /* Default name */
+ /* Intel-defined flags: level 0x00000001 */
+ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
+ c->x86_capability[0] = capability;
+ c->x86_capability[4] = excap;
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf)
+ c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ c->x86_mask = tfms & 15;
+ if ( cpu_has(c, X86_FEATURE_CLFLSH) )
+ c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+ if ( xlvl >= 0x80000001 ) {
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[6] = cpuid_ecx(0x80000001);
}
-
- /* Intel-defined flags: level 0x00000007 */
- if ( c->cpuid_level >= 0x00000007 )
- c->x86_capability[X86_FEATURE_FSGSBASE / 32]
- = cpuid_ebx(0x00000007);
+ if ( xlvl >= 0x80000004 )
+ get_model_name(c); /* Default name */
}
+ /* Intel-defined flags: level 0x00000007 */
+ if ( c->cpuid_level >= 0x00000007 )
+ c->x86_capability[X86_FEATURE_FSGSBASE / 32]
+ = cpuid_ebx(0x00000007);
+
early_intel_workaround(c);
#ifdef CONFIG_X86_HT
c->x86_clflush_size = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
- if (!have_cpuid_p()) {
- /* First of all, decide if this is a 486 or higher */
- /* It's a 486 if we can modify the AC flag */
- if ( flag_is_changeable_p(X86_EFLAGS_AC) )
- c->x86 = 4;
- else
- c->x86 = 3;
- }
-
generic_identify(c);
#ifdef NOISY_CAPS
if (c->x86_vendor < X86_VENDOR_NUM)
vendor = this_cpu->c_vendor;
- else if (c->cpuid_level >= 0)
+ else
vendor = c->x86_vendor_id;
if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
else
printk("%s", c->x86_model_id);
- if (c->x86_mask || c->cpuid_level >= 0)
- printk(" stepping %02x\n", c->x86_mask);
- else
- printk("\n");
+ printk(" stepping %02x\n", c->x86_mask);
}
static cpumask_t cpu_initialized;
*/
void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{
- unsigned char ccr2, ccr3;
+ unsigned char ccr3;
unsigned long flags;
/* we test for DEVID by checking whether CCR3 is writable */
setCx86(CX86_CCR3, ccr3 ^ 0x80);
getCx86(0xc0); /* dummy to change bus */
- if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */
- ccr2 = getCx86(CX86_CCR2);
- setCx86(CX86_CCR2, ccr2 ^ 0x04);
- getCx86(0xc0); /* dummy */
-
- if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
- *dir0 = 0xfd;
- else { /* Cx486S A step */
- setCx86(CX86_CCR2, ccr2);
- *dir0 = 0xfe;
- }
- }
+ if (getCx86(CX86_CCR3) == ccr3) /* no DEVID regs. */
+ BUG();
else {
setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
"M II ", "Unknown"
};
-static char Cx486_name[][5] __initdata = {
- "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
- "SRx2", "DRx2"
-};
-static char Cx486S_name[][4] __initdata = {
- "S", "S2", "Se", "S2e"
-};
-static char Cx486D_name[][4] __initdata = {
- "DX", "DX2", "?", "?", "?", "DX4"
-};
static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
static char cyrix_model_mult1[] __initdata = "12??43";
static char cyrix_model_mult2[] __initdata = "12233445";
switch (dir0_msn) {
unsigned char tmp;
- case 0: /* Cx486SLC/DLC/SRx/DRx */
- p = Cx486_name[dir0_lsn & 7];
- break;
-
- case 1: /* Cx486S/DX/DX2/DX4 */
- p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
- : Cx486S_name[dir0_lsn & 3];
- break;
-
- case 2: /* 5x86 */
- Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
- p = Cx86_cb+2;
- break;
-
case 3: /* 6x86/6x86L */
Cx86_cb[1] = ' ';
Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
break;
- case 0xf: /* Cyrix 486 without DEVID registers */
- switch (dir0_lsn) {
- case 0xd: /* either a 486SLC or DLC w/o DEVID */
- dir0_msn = 0;
- p = Cx486_name[/*(c->hard_math) ? 1 : 0*/1];
- break;
-
- case 0xe: /* a 486S A step */
- dir0_msn = 0;
- p = Cx486S_name[0];
- break;
- }
- break;
-
default: /* unknown (shouldn't happen, we know everyone ;-) */
dir0_msn = 7;
break;
return (unsigned char) (test >> 8) == 0x02;
}
-static void cyrix_identify(struct cpuinfo_x86 * c)
-{
- /* Detect Cyrix with disabled CPUID */
- if ( c->x86 == 4 && test_cyrix_52div() ) {
- unsigned char dir0, dir1;
-
- safe_strcpy(c->x86_vendor_id, "CyrixInstead");
- c->x86_vendor = X86_VENDOR_CYRIX;
-
- /* Actually enable cpuid on the older cyrix */
-
- /* Retrieve CPU revisions */
-
- do_cyrix_devid(&dir0, &dir1);
-
- dir0>>=4;
-
- /* Check it is an affected model */
-
- if (dir0 == 5 || dir0 == 3)
- {
- unsigned char ccr3, ccr4;
- unsigned long flags;
- printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
- local_irq_save(flags);
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- ccr4 = getCx86(CX86_CCR4);
- setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
- local_irq_restore(flags);
- }
- }
- generic_identify(c);
-}
-
static struct cpu_dev cyrix_cpu_dev __initdata = {
.c_vendor = "Cyrix",
.c_ident = { "CyrixInstead" },
.c_init = init_cyrix,
- .c_identify = cyrix_identify,
+ .c_identify = generic_identify,
};
int __init cyrix_init_cpu(void)
#define select_idle_routine(x) ((void)0)
-extern int trap_init_f00f_bug(void);
-
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Alignment at which movsl is preferred for bulk memory copies.
unsigned int l2 = 0;
char *p = NULL;
-#ifdef CONFIG_X86_F00F_BUG
- /*
- * All current models of Pentium and Pentium with MMX technology CPUs
- * have the F0 0F bug, which lets nonprivileged users lock up the system.
- * Note that the workaround only should be initialized once...
- */
- c->f00f_bug = 0;
- if ( c->x86 == 5 ) {
- static int f00f_workaround_enabled = 0;
-
- c->f00f_bug = 1;
- if ( !f00f_workaround_enabled ) {
- trap_init_f00f_bug();
- printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
- f00f_workaround_enabled = 1;
- }
- }
-#endif
-
/* Detect the extended topology information if available */
detect_extended_topology(c);
* Set up the preferred alignment for movsl bulk memory moves
*/
switch (c->x86) {
- case 4: /* 486: untested */
- break;
- case 5: /* Old Pentia: untested */
- break;
case 6: /* PII/PIII only like movsl with 8-byte alignment */
movsl_mask.mask = 7;
break;
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
.c_models = {
- { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
- {
- [0] = "486 DX-25/33",
- [1] = "486 DX-50",
- [2] = "486 SX",
- [3] = "486 DX/2",
- [4] = "486 SL",
- [5] = "486 SX/2",
- [7] = "486 DX/2-WB",
- [8] = "486 DX/4",
- [9] = "486 DX/4-WB"
- }
- },
- { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
- {
- [0] = "Pentium 60/66 A-step",
- [1] = "Pentium 60/66",
- [2] = "Pentium 75 - 200",
- [3] = "OverDrive PODP5V83",
- [4] = "Pentium MMX",
- [7] = "Mobile Pentium 75 - 200",
- [8] = "Mobile Pentium MMX"
- }
- },
{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
{
[0] = "Pentium Pro A-step",
break;
case X86_VENDOR_INTEL:
- /*
- * The P5 family is different. P4/P6 and latest CPUs share the
- * same polling methods.
- */
- if ( c->x86 != 5 )
- {
- init_timer(&mce_timer, mce_work_fn, NULL, 0);
- set_timer(&mce_timer, NOW() + MCE_PERIOD);
- }
+ init_timer(&mce_timer, mce_work_fn, NULL, 0);
+ set_timer(&mce_timer, NOW() + MCE_PERIOD);
break;
}
#define setup_trampoline() (bootsym_phys(trampoline_realmode_entry))
-/* Set if we find a B stepping CPU */
-static int smp_b_stepping;
-
/* Package ID of each logical CPU */
int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
if ( id != 0 )
identify_cpu(c);
- /* Mask B, Pentium, but not Pentium MMX -- remember it, as it has bugs. */
- if ( (c->x86_vendor == X86_VENDOR_INTEL) &&
- (c->x86 == 5) &&
- ((c->x86_mask >= 1) && (c->x86_mask <= 4)) &&
- (c->x86_model <= 3) )
- smp_b_stepping = 1;
-
/*
* Certain Athlons might work (for various values of 'work') in SMP
* but they are not certified as MP capable.
void __init smp_cpus_done(unsigned int max_cpus)
{
- if ( smp_b_stepping )
- printk(KERN_WARNING "WARNING: SMP operation may be "
- "unreliable with B stepping processors.\n");
-
/*
* Don't taint if we are running SMP kernel on a single non-MP
* approved Athlon