/* See comment in start_update() for cases when this routine fails */
static int collect_cpu_info(struct cpu_signature *csig)
{
- unsigned int cpu = smp_processor_id();
- struct cpuinfo_x86 *c = &cpu_data[cpu];
-
memset(csig, 0, sizeof(*csig));
- if ( (c->x86_vendor != X86_VENDOR_AMD) || (c->x86 < 0x10) )
- {
- printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n",
- cpu);
- return -EINVAL;
- }
-
rdmsrl(MSR_AMD_PATCHLEVEL, csig->rev);
pr_debug("microcode: CPU%d collect_cpu_info: patch_id=%#x\n",
- cpu, csig->rev);
+ smp_processor_id(), csig->rev);
return 0;
}
}
#endif
-static const struct microcode_ops microcode_amd_ops = {
+const struct microcode_ops amd_ucode_ops = {
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode,
.compare_patch = compare_patch,
.match_cpu = match_cpu,
};
-
-int __init microcode_init_amd(void)
-{
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- microcode_ops = µcode_amd_ops;
- return 0;
-}
microcode_scan_module(module_map, mbi);
}
-const struct microcode_ops *microcode_ops;
+static const struct microcode_ops __read_mostly *microcode_ops;
static DEFINE_SPINLOCK(microcode_mutex);
int __init early_microcode_init(void)
{
- int rc;
-
- rc = microcode_init_intel();
- if ( rc )
- return rc;
-
- rc = microcode_init_amd();
- if ( rc )
- return rc;
+ const struct cpuinfo_x86 *c = &boot_cpu_data;
+ int rc = 0;
- if ( microcode_ops )
+ switch ( c->x86_vendor )
{
- microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
+ case X86_VENDOR_AMD:
+ if ( c->x86 >= 0x10 )
+ microcode_ops = &amd_ucode_ops;
+ break;
+
+ case X86_VENDOR_INTEL:
+ if ( c->x86 >= 6 )
+ microcode_ops = &intel_ucode_ops;
+ break;
+ }
- if ( ucode_mod.mod_end || ucode_blob.size )
- rc = early_microcode_update_cpu();
+ if ( !microcode_ops )
+ {
+ printk(XENLOG_WARNING "Microcode loading not available\n");
+ return -ENODEV;
}
+ microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
+
+ if ( ucode_mod.mod_end || ucode_blob.size )
+ rc = early_microcode_update_cpu();
+
return rc;
}
static int collect_cpu_info(struct cpu_signature *csig)
{
- unsigned int cpu_num = smp_processor_id();
- struct cpuinfo_x86 *c = &cpu_data[cpu_num];
uint64_t msr_content;
memset(csig, 0, sizeof(*csig));
- if ( (c->x86_vendor != X86_VENDOR_INTEL) || (c->x86 < 6) )
- {
- printk(KERN_ERR "microcode: CPU%d not a capable Intel "
- "processor\n", cpu_num);
- return -1;
- }
-
csig->sig = cpuid_eax(0x00000001);
- if ( (c->x86_model >= 5) || (c->x86 > 6) )
- {
- /* get processor flags from MSR 0x17 */
- rdmsrl(MSR_IA32_PLATFORM_ID, msr_content);
- csig->pf = 1 << ((msr_content >> 50) & 7);
- }
+ rdmsrl(MSR_IA32_PLATFORM_ID, msr_content);
+ csig->pf = 1 << ((msr_content >> 50) & 7);
wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
/* As documented in the SDM: Do a CPUID 1 here */
return patch;
}
-static const struct microcode_ops microcode_intel_ops = {
+const struct microcode_ops intel_ucode_ops = {
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode,
.compare_patch = compare_patch,
.match_cpu = match_cpu,
};
-
-int __init microcode_init_intel(void)
-{
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
- microcode_ops = µcode_intel_ops;
- return 0;
-}
const struct microcode_patch *new, const struct microcode_patch *old);
};
-extern const struct microcode_ops *microcode_ops;
-
-int microcode_init_intel(void);
-int microcode_init_amd(void);
+extern const struct microcode_ops amd_ucode_ops, intel_ucode_ops;
#endif /* ASM_X86_MICROCODE_PRIVATE_H */