rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
rdmsrl(MSR_CSTAR, saved_cstar);
rdmsrl(MSR_LSTAR, saved_lstar);
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
{
rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
wrmsrl(MSR_GS_BASE, saved_gs_base);
wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
{
/* Recover sysenter MSRs */
wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
subdir-y += mtrr
obj-y += amd.o
+obj-y += centaur.o
obj-y += common.o
obj-y += intel.o
obj-y += intel_cacheinfo.o
obj-y += mwait-idle.o
-
-# Keeping around for VIA support (JBeulich)
-# obj-$(x86_32) += centaur.o
c->x86_capability[5] = cpuid_edx(0xC0000001);
}
- /* Cyrix III family needs CX8 & PGE explicity enabled. */
- if (c->x86_model >=6 && c->x86_model <= 9) {
- rdmsrl(MSR_VIA_FCR, msr_content);
- wrmsrl(MSR_VIA_FCR, msr_content | (1ULL << 1 | 1ULL << 7));
- set_bit(X86_FEATURE_CX8, c->x86_capability);
+ if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+ c->x86_cache_alignment = c->x86_clflush_size * 2;
+ set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
}
- /* Before Nehemiah, the C3's had 3dNOW! */
- if (c->x86_model >=6 && c->x86_model <9)
- set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-
get_model_name(c);
display_cacheinfo(c);
}
static void __init init_centaur(struct cpuinfo_x86 *c)
{
- /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
- 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
- clear_bit(0*32+31, c->x86_capability);
-
if (c->x86 == 6)
init_c3(c);
}
-static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
- /* VIA C3 CPUs (670-68F) need further shifting. */
- if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
- size >>= 8;
-
- /* VIA also screwed up Nehemiah stepping 1, and made
- it return '65KB' instead of '64KB'
- - Note, it seems this may only be in engineering samples. */
- if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
- size -=1;
-
- return size;
-}
-
static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_vendor = "Centaur",
.c_ident = { "CentaurHauls" },
.c_init = init_centaur,
- .c_size_cache = centaur_size_cache,
};
int __init centaur_init_cpu(void)
cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev;
return 0;
}
-
-//early_arch_initcall(centaur_init_cpu);
{
intel_cpu_init();
amd_init_cpu();
+ centaur_init_cpu();
early_cpu_detect();
}
/*
{
struct hvm_function_table *fns = NULL;
- switch ( boot_cpu_data.x86_vendor )
- {
- case X86_VENDOR_INTEL:
+ if ( cpu_has_vmx )
fns = start_vmx();
- break;
- case X86_VENDOR_AMD:
+ else if ( cpu_has_svm )
fns = start_svm();
- break;
- default:
- break;
- }
if ( fns == NULL )
return 0;
nestedhvm_setup(void)
{
/* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
- unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
+ unsigned nr = cpu_has_vmx ? 2 : 3;
unsigned int i, order = get_order_from_pages(nr);
if ( !hvm_funcs.name )
{
bool_t printed = 0;
- if ( !test_bit(X86_FEATURE_SVM, &boot_cpu_data.x86_capability) )
- return NULL;
-
svm_host_osvw_reset();
if ( svm_cpu_up() )
*(u32 *)(p + 1) = 0x80000000;
*(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */
*(u8 *)(p + 6) = 0x01;
- *(u8 *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- ? 0xc1 : 0xd9);
+ *(u8 *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9);
*(u8 *)(p + 8) = 0xc3; /* ret */
memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
struct hvm_function_table * __init start_vmx(void)
{
- if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
- return NULL;
-
set_in_cr4(X86_CR4_VMXE);
if ( vmx_cpu_up() )
break;
/* Currently only EPT is supported */
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+ if ( !cpu_has_vmx )
break;
rc = mem_event_enable(d, mec, med, _VPF_mem_access,
p2m->cr3 = CR3_EADDR;
- if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
+ if ( hap_enabled(d) && cpu_has_vmx )
ept_p2m_init(p2m);
else
p2m_pt_init(p2m);
wrmsrl(MSR_LSTAR, (unsigned long)stack);
stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR )
{
/* SYSENTER entry. */
wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom);