obj-y += ioport_emulate.o
obj-y += irq.o
obj-$(CONFIG_KEXEC) += machine_kexec.o
-obj-y += microcode_amd.o
-obj-y += microcode_intel.o
-obj-y += microcode.o
obj-y += mm.o x86_64/mm.o
obj-$(CONFIG_HVM) += monitor.o
obj-y += mpparse.o
obj-y += mcheck/
+obj-y += microcode/
obj-y += mtrr/
obj-y += amd.o
--- /dev/null
+obj-y += amd.o
+obj-y += core.o
+obj-y += intel.o
--- /dev/null
+/*
+ * AMD CPU Microcode Update Driver for Linux
+ * Copyright (C) 2008 Advanced Micro Devices Inc.
+ *
+ * Author: Peter Oruba <peter.oruba@amd.com>
+ *
+ * Based on work by:
+ * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ *
+ * This driver allows to upgrade microcode on AMD
+ * family 0x10 and later.
+ *
+ * Licensed unter the terms of the GNU General Public
+ * License version 2. See file COPYING for details.
+ */
+
+#include <xen/err.h>
+#include <xen/init.h>
+#include <xen/mm.h> /* TODO: Fix asm/tlbflush.h breakage */
+
+#include <asm/hvm/svm/svm.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+
+#include "private.h"
+
+#define pr_debug(x...) ((void)0)
+
+#define CONT_HDR_SIZE 12
+#define SECTION_HDR_SIZE 8
+#define PATCH_HDR_SIZE 32
+
+struct __packed equiv_cpu_entry {
+ uint32_t installed_cpu;
+ uint32_t fixed_errata_mask;
+ uint32_t fixed_errata_compare;
+ uint16_t equiv_cpu;
+ uint16_t reserved;
+};
+
+struct __packed microcode_header_amd {
+ uint32_t data_code;
+ uint32_t patch_id;
+ uint8_t mc_patch_data_id[2];
+ uint8_t mc_patch_data_len;
+ uint8_t init_flag;
+ uint32_t mc_patch_data_checksum;
+ uint32_t nb_dev_id;
+ uint32_t sb_dev_id;
+ uint16_t processor_rev_id;
+ uint8_t nb_rev_id;
+ uint8_t sb_rev_id;
+ uint8_t bios_api_rev;
+ uint8_t reserved1[3];
+ uint32_t match_reg[8];
+};
+
+#define UCODE_MAGIC 0x00414d44
+#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
+#define UCODE_UCODE_TYPE 0x00000001
+
+struct microcode_amd {
+ void *mpb;
+ size_t mpb_size;
+ struct equiv_cpu_entry *equiv_cpu_table;
+ size_t equiv_cpu_table_size;
+};
+
+struct mpbhdr {
+ uint32_t type;
+ uint32_t len;
+ uint8_t data[];
+};
+
+/* See comment in start_update() for cases when this routine fails */
+static int collect_cpu_info(struct cpu_signature *csig)
+{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_x86 *c = &cpu_data[cpu];
+
+ memset(csig, 0, sizeof(*csig));
+
+ if ( (c->x86_vendor != X86_VENDOR_AMD) || (c->x86 < 0x10) )
+ {
+ printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n",
+ cpu);
+ return -EINVAL;
+ }
+
+ rdmsrl(MSR_AMD_PATCHLEVEL, csig->rev);
+
+ pr_debug("microcode: CPU%d collect_cpu_info: patch_id=%#x\n",
+ cpu, csig->rev);
+
+ return 0;
+}
+
+static bool_t verify_patch_size(uint32_t patch_size)
+{
+ uint32_t max_size;
+
+#define F1XH_MPB_MAX_SIZE 2048
+#define F14H_MPB_MAX_SIZE 1824
+#define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
+#define F17H_MPB_MAX_SIZE 3200
+
+ switch (boot_cpu_data.x86)
+ {
+ case 0x14:
+ max_size = F14H_MPB_MAX_SIZE;
+ break;
+ case 0x15:
+ max_size = F15H_MPB_MAX_SIZE;
+ break;
+ case 0x16:
+ max_size = F16H_MPB_MAX_SIZE;
+ break;
+ case 0x17:
+ max_size = F17H_MPB_MAX_SIZE;
+ break;
+ default:
+ max_size = F1XH_MPB_MAX_SIZE;
+ break;
+ }
+
+ return (patch_size <= max_size);
+}
+
+static bool_t find_equiv_cpu_id(const struct equiv_cpu_entry *equiv_cpu_table,
+ unsigned int current_cpu_id,
+ unsigned int *equiv_cpu_id)
+{
+ unsigned int i;
+
+ if ( !equiv_cpu_table )
+ return 0;
+
+ for ( i = 0; equiv_cpu_table[i].installed_cpu != 0; i++ )
+ {
+ if ( current_cpu_id == equiv_cpu_table[i].installed_cpu )
+ {
+ *equiv_cpu_id = equiv_cpu_table[i].equiv_cpu & 0xffff;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static enum microcode_match_result microcode_fits(
+ const struct microcode_amd *mc_amd)
+{
+ unsigned int cpu = smp_processor_id();
+ const struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
+ const struct microcode_header_amd *mc_header = mc_amd->mpb;
+ const struct equiv_cpu_entry *equiv_cpu_table = mc_amd->equiv_cpu_table;
+ unsigned int current_cpu_id;
+ unsigned int equiv_cpu_id;
+
+ current_cpu_id = cpuid_eax(0x00000001);
+
+ if ( !find_equiv_cpu_id(equiv_cpu_table, current_cpu_id, &equiv_cpu_id) )
+ return MIS_UCODE;
+
+ if ( (mc_header->processor_rev_id) != equiv_cpu_id )
+ return MIS_UCODE;
+
+ if ( !verify_patch_size(mc_amd->mpb_size) )
+ {
+ pr_debug("microcode: patch size mismatch\n");
+ return MIS_UCODE;
+ }
+
+ if ( mc_header->patch_id <= sig->rev )
+ {
+ pr_debug("microcode: patch is already at required level or greater.\n");
+ return OLD_UCODE;
+ }
+
+ pr_debug("microcode: CPU%d found a matching microcode update with version %#x (current=%#x)\n",
+ cpu, mc_header->patch_id, sig->rev);
+
+ return NEW_UCODE;
+}
+
+static bool match_cpu(const struct microcode_patch *patch)
+{
+ return patch && (microcode_fits(patch->mc_amd) == NEW_UCODE);
+}
+
+static void free_patch(void *mc)
+{
+ struct microcode_amd *mc_amd = mc;
+
+ if ( mc_amd )
+ {
+ xfree(mc_amd->equiv_cpu_table);
+ xfree(mc_amd->mpb);
+ xfree(mc_amd);
+ }
+}
+
+static enum microcode_match_result compare_header(
+ const struct microcode_header_amd *new_header,
+ const struct microcode_header_amd *old_header)
+{
+ if ( new_header->processor_rev_id == old_header->processor_rev_id )
+ return (new_header->patch_id > old_header->patch_id) ? NEW_UCODE
+ : OLD_UCODE;
+
+ return MIS_UCODE;
+}
+
+static enum microcode_match_result compare_patch(
+ const struct microcode_patch *new, const struct microcode_patch *old)
+{
+ const struct microcode_header_amd *new_header = new->mc_amd->mpb;
+ const struct microcode_header_amd *old_header = old->mc_amd->mpb;
+
+ /* Both patches to compare are supposed to be applicable to local CPU. */
+ ASSERT(microcode_fits(new->mc_amd) != MIS_UCODE);
+ ASSERT(microcode_fits(new->mc_amd) != MIS_UCODE);
+
+ return compare_header(new_header, old_header);
+}
+
+static int apply_microcode(const struct microcode_patch *patch)
+{
+ uint32_t rev;
+ int hw_err;
+ unsigned int cpu = smp_processor_id();
+ struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
+ const struct microcode_header_amd *hdr;
+
+ if ( !patch )
+ return -ENOENT;
+
+ if ( !match_cpu(patch) )
+ return -EINVAL;
+
+ hdr = patch->mc_amd->mpb;
+
+ BUG_ON(local_irq_is_enabled());
+
+ hw_err = wrmsr_safe(MSR_AMD_PATCHLOADER, (unsigned long)hdr);
+
+ /* get patch id after patching */
+ rdmsrl(MSR_AMD_PATCHLEVEL, rev);
+
+ /*
+ * Some processors leave the ucode blob mapping as UC after the update.
+ * Flush the mapping to regain normal cacheability.
+ */
+ flush_area_local(hdr, FLUSH_TLB_GLOBAL | FLUSH_ORDER(0));
+
+ /* check current patch id and patch's id for match */
+ if ( hw_err || (rev != hdr->patch_id) )
+ {
+ printk(KERN_ERR "microcode: CPU%d update from revision "
+ "%#x to %#x failed\n", cpu, rev, hdr->patch_id);
+ return -EIO;
+ }
+
+ printk(KERN_WARNING "microcode: CPU%d updated from revision %#x to %#x\n",
+ cpu, sig->rev, hdr->patch_id);
+
+ sig->rev = rev;
+
+ return 0;
+}
+
+static int get_ucode_from_buffer_amd(
+ struct microcode_amd *mc_amd,
+ const void *buf,
+ size_t bufsize,
+ size_t *offset)
+{
+ const struct mpbhdr *mpbuf = buf + *offset;
+
+ /* No more data */
+ if ( *offset >= bufsize )
+ {
+ printk(KERN_ERR "microcode: Microcode buffer overrun\n");
+ return -EINVAL;
+ }
+
+ if ( mpbuf->type != UCODE_UCODE_TYPE )
+ {
+ printk(KERN_ERR "microcode: Wrong microcode payload type field\n");
+ return -EINVAL;
+ }
+
+ if ( (*offset + mpbuf->len) > bufsize )
+ {
+ printk(KERN_ERR "microcode: Bad data in microcode data file\n");
+ return -EINVAL;
+ }
+
+ mc_amd->mpb = xmalloc_bytes(mpbuf->len);
+ if ( !mc_amd->mpb )
+ return -ENOMEM;
+ mc_amd->mpb_size = mpbuf->len;
+ memcpy(mc_amd->mpb, mpbuf->data, mpbuf->len);
+
+ pr_debug("microcode: CPU%d size %zu, block size %u offset %zu equivID %#x rev %#x\n",
+ raw_smp_processor_id(), bufsize, mpbuf->len, *offset,
+ ((struct microcode_header_amd *)mc_amd->mpb)->processor_rev_id,
+ ((struct microcode_header_amd *)mc_amd->mpb)->patch_id);
+
+ *offset += mpbuf->len + SECTION_HDR_SIZE;
+
+ return 0;
+}
+
+static int install_equiv_cpu_table(
+ struct microcode_amd *mc_amd,
+ const void *data,
+ size_t *offset)
+{
+ const struct mpbhdr *mpbuf = data + *offset + 4;
+
+ *offset += mpbuf->len + CONT_HDR_SIZE; /* add header length */
+
+ if ( mpbuf->type != UCODE_EQUIV_CPU_TABLE_TYPE )
+ {
+ printk(KERN_ERR "microcode: Wrong microcode equivalent cpu table type field\n");
+ return -EINVAL;
+ }
+
+ if ( mpbuf->len == 0 )
+ {
+ printk(KERN_ERR "microcode: Wrong microcode equivalent cpu table length\n");
+ return -EINVAL;
+ }
+
+ mc_amd->equiv_cpu_table = xmalloc_bytes(mpbuf->len);
+ if ( !mc_amd->equiv_cpu_table )
+ {
+ printk(KERN_ERR "microcode: Cannot allocate memory for equivalent cpu table\n");
+ return -ENOMEM;
+ }
+
+ memcpy(mc_amd->equiv_cpu_table, mpbuf->data, mpbuf->len);
+ mc_amd->equiv_cpu_table_size = mpbuf->len;
+
+ return 0;
+}
+
+static int container_fast_forward(const void *data, size_t size_left, size_t *offset)
+{
+ for ( ; ; )
+ {
+ size_t size;
+ const uint32_t *header;
+
+ if ( size_left < SECTION_HDR_SIZE )
+ return -EINVAL;
+
+ header = data + *offset;
+
+ if ( header[0] == UCODE_MAGIC &&
+ header[1] == UCODE_EQUIV_CPU_TABLE_TYPE )
+ break;
+
+ if ( header[0] != UCODE_UCODE_TYPE )
+ return -EINVAL;
+ size = header[1] + SECTION_HDR_SIZE;
+ if ( size < PATCH_HDR_SIZE || size_left < size )
+ return -EINVAL;
+
+ size_left -= size;
+ *offset += size;
+
+ if ( !size_left )
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+/*
+ * The 'final_levels' of patch ids have been obtained empirically.
+ * Refer bug https://bugzilla.suse.com/show_bug.cgi?id=913996
+ * for details of the issue. The short version is that people
+ * using certain Fam10h systems noticed system hang issues when
+ * trying to update microcode levels beyond the patch IDs below.
+ * From internal discussions, we gathered that OS/hypervisor
+ * cannot reliably perform microcode updates beyond these levels
+ * due to hardware issues. Therefore, we need to abort microcode
+ * update process if we hit any of these levels.
+ */
+static const unsigned int final_levels[] = {
+ 0x01000098,
+ 0x0100009f,
+ 0x010000af
+};
+
+static bool_t check_final_patch_levels(unsigned int cpu)
+{
+ /*
+ * Check the current patch levels on the cpu. If they are equal to
+ * any of the 'final_levels', then we should not update the microcode
+ * patch on the cpu as system will hang otherwise.
+ */
+ const struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
+ unsigned int i;
+
+ if ( boot_cpu_data.x86 != 0x10 )
+ return 0;
+
+ for ( i = 0; i < ARRAY_SIZE(final_levels); i++ )
+ if ( sig->rev == final_levels[i] )
+ return 1;
+
+ return 0;
+}
+
+static struct microcode_patch *cpu_request_microcode(const void *buf,
+ size_t bufsize)
+{
+ struct microcode_amd *mc_amd;
+ struct microcode_header_amd *saved = NULL;
+ struct microcode_patch *patch = NULL;
+ size_t offset = 0, saved_size = 0;
+ int error = 0;
+ unsigned int current_cpu_id;
+ unsigned int equiv_cpu_id;
+ unsigned int cpu = smp_processor_id();
+ const struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
+
+ current_cpu_id = cpuid_eax(0x00000001);
+
+ if ( *(const uint32_t *)buf != UCODE_MAGIC )
+ {
+ printk(KERN_ERR "microcode: Wrong microcode patch file magic\n");
+ error = -EINVAL;
+ goto out;
+ }
+
+ if ( check_final_patch_levels(cpu) )
+ {
+ printk(XENLOG_INFO
+ "microcode: Cannot update microcode patch on the cpu as we hit a final level\n");
+ error = -EPERM;
+ goto out;
+ }
+
+ mc_amd = xzalloc(struct microcode_amd);
+ if ( !mc_amd )
+ {
+ printk(KERN_ERR "microcode: Cannot allocate memory for microcode patch\n");
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Multiple container file support:
+ * 1. check if this container file has equiv_cpu_id match
+ * 2. If not, fast-fwd to next container file
+ */
+ while ( offset < bufsize )
+ {
+ error = install_equiv_cpu_table(mc_amd, buf, &offset);
+ if ( error )
+ {
+ printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
+ break;
+ }
+
+ /*
+ * Could happen as we advance 'offset' early
+ * in install_equiv_cpu_table
+ */
+ if ( offset > bufsize )
+ {
+ printk(KERN_ERR "microcode: Microcode buffer overrun\n");
+ error = -EINVAL;
+ break;
+ }
+
+ if ( find_equiv_cpu_id(mc_amd->equiv_cpu_table, current_cpu_id,
+ &equiv_cpu_id) )
+ break;
+
+ error = container_fast_forward(buf, bufsize - offset, &offset);
+ if ( error == -ENODATA )
+ {
+ ASSERT(offset == bufsize);
+ break;
+ }
+ if ( error )
+ {
+ printk(KERN_ERR "microcode: CPU%d incorrect or corrupt container file\n"
+ "microcode: Failed to update patch level. "
+ "Current lvl:%#x\n", cpu, sig->rev);
+ break;
+ }
+ }
+
+ if ( error )
+ {
+ /*
+ * -ENODATA here means that the blob was parsed fine but no matching
+ * ucode was found. Don't return it to the caller.
+ */
+ if ( error == -ENODATA )
+ error = 0;
+
+ xfree(mc_amd->equiv_cpu_table);
+ xfree(mc_amd);
+ goto out;
+ }
+
+ /*
+ * It's possible the data file has multiple matching ucode,
+ * lets keep searching till the latest version
+ */
+ while ( (error = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
+ &offset)) == 0 )
+ {
+ /*
+ * If the new ucode covers current CPU, compare ucodes and store the
+ * one with higher revision.
+ */
+ if ( (microcode_fits(mc_amd) != MIS_UCODE) &&
+ (!saved || (compare_header(mc_amd->mpb, saved) == NEW_UCODE)) )
+ {
+ xfree(saved);
+ saved = mc_amd->mpb;
+ saved_size = mc_amd->mpb_size;
+ }
+ else
+ {
+ xfree(mc_amd->mpb);
+ mc_amd->mpb = NULL;
+ }
+
+ if ( offset >= bufsize )
+ break;
+
+ /*
+ * 1. Given a situation where multiple containers exist and correct
+ * patch lives on a container that is not the last container.
+ * 2. We match equivalent ids using find_equiv_cpu_id() from the
+ * earlier while() (On this case, matches on earlier container
+ * file and we break)
+ * 3. Proceed to while ( (error = get_ucode_from_buffer_amd(mc_amd,
+ * buf, bufsize,&offset)) == 0 )
+ * 4. Find correct patch using microcode_fits() and apply the patch
+ * (Assume: apply_microcode() is successful)
+ * 5. The while() loop from (3) continues to parse the binary as
+ * there is a subsequent container file, but...
+ * 6. ...a correct patch can only be on one container and not on any
+ * subsequent ones. (Refer docs for more info) Therefore, we
+ * don't have to parse a subsequent container. So, we can abort
+ * the process here.
+ * 7. This ensures that we retain a success value (= 0) to 'error'
+ * before if ( mpbuf->type != UCODE_UCODE_TYPE ) evaluates to
+ * false and returns -EINVAL.
+ */
+ if ( offset + SECTION_HDR_SIZE <= bufsize &&
+ *(const uint32_t *)(buf + offset) == UCODE_MAGIC )
+ break;
+ }
+
+ if ( saved )
+ {
+ mc_amd->mpb = saved;
+ mc_amd->mpb_size = saved_size;
+ patch = xmalloc(struct microcode_patch);
+ if ( patch )
+ patch->mc_amd = mc_amd;
+ else
+ {
+ free_patch(mc_amd);
+ error = -ENOMEM;
+ }
+ }
+ else
+ free_patch(mc_amd);
+
+ out:
+ if ( error && !patch )
+ patch = ERR_PTR(error);
+
+ return patch;
+}
+
+#ifdef CONFIG_HVM
+static int start_update(void)
+{
+ /*
+ * svm_host_osvw_init() will be called on each cpu by calling '.end_update'
+ * in common code.
+ */
+ svm_host_osvw_reset();
+
+ return 0;
+}
+#endif
+
+static const struct microcode_ops microcode_amd_ops = {
+ .cpu_request_microcode = cpu_request_microcode,
+ .collect_cpu_info = collect_cpu_info,
+ .apply_microcode = apply_microcode,
+#ifdef CONFIG_HVM
+ .start_update = start_update,
+ .end_update_percpu = svm_host_osvw_init,
+#endif
+ .free_patch = free_patch,
+ .compare_patch = compare_patch,
+ .match_cpu = match_cpu,
+};
+
+int __init microcode_init_amd(void)
+{
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ microcode_ops = µcode_amd_ops;
+ return 0;
+}
--- /dev/null
+/*
+ * Intel CPU Microcode Update Driver for Linux
+ *
+ * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * 2006 Shaohua Li <shaohua.li@intel.com> *
+ * This driver allows to upgrade microcode on Intel processors
+ * belonging to IA-32 family - PentiumPro, Pentium II,
+ * Pentium III, Xeon, Pentium 4, etc.
+ *
+ * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
+ * Software Developer's Manual
+ * Order Number 253668 or free download from:
+ *
+ * http://developer.intel.com/design/pentium4/manuals/253668.htm
+ *
+ * For more information, go to http://www.urbanmyth.org/microcode
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <xen/cpu.h>
+#include <xen/earlycpio.h>
+#include <xen/err.h>
+#include <xen/guest_access.h>
+#include <xen/init.h>
+#include <xen/param.h>
+#include <xen/spinlock.h>
+#include <xen/stop_machine.h>
+#include <xen/watchdog.h>
+
+#include <asm/apic.h>
+#include <asm/delay.h>
+#include <asm/nmi.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+
+#include "private.h"
+
+/*
+ * Before performing a late microcode update on any thread, we
+ * rendezvous all cpus in stop_machine context. The timeout for
+ * waiting for cpu rendezvous is 30ms. It is the timeout used by
+ * live patching
+ */
+#define MICROCODE_CALLIN_TIMEOUT_US 30000
+
+/*
+ * Timeout for each thread to complete update is set to 1s. It is a
+ * conservative choice considering all possible interference.
+ */
+#define MICROCODE_UPDATE_TIMEOUT_US 1000000
+
+static module_t __initdata ucode_mod;
+static signed int __initdata ucode_mod_idx;
+static bool_t __initdata ucode_mod_forced;
+static unsigned int nr_cores;
+
+/*
+ * These states help to coordinate CPUs during loading an update.
+ *
+ * The semantics of each state is as follow:
+ * - LOADING_PREPARE: initial state of 'loading_state'.
+ * - LOADING_CALLIN: CPUs are allowed to callin.
+ * - LOADING_ENTER: all CPUs have called in. Initiate ucode loading.
+ * - LOADING_EXIT: ucode loading is done or aborted.
+ */
+static enum {
+ LOADING_PREPARE,
+ LOADING_CALLIN,
+ LOADING_ENTER,
+ LOADING_EXIT,
+} loading_state;
+
+/*
+ * If we scan the initramfs.cpio for the early microcode code
+ * and find it, then 'ucode_blob' will contain the pointer
+ * and the size of said blob. It is allocated from Xen's heap
+ * memory.
+ */
+struct ucode_mod_blob {
+ const void *data;
+ size_t size;
+};
+
+static struct ucode_mod_blob __initdata ucode_blob;
+/*
+ * By default we will NOT parse the multiboot modules to see if there is
+ * cpio image with the microcode images.
+ */
+static bool_t __initdata ucode_scan;
+
+/* By default, ucode loading is done in NMI handler */
+static bool ucode_in_nmi = true;
+
+/* Protected by microcode_mutex */
+static struct microcode_patch *microcode_cache;
+
+void __init microcode_set_module(unsigned int idx)
+{
+ ucode_mod_idx = idx;
+ ucode_mod_forced = 1;
+}
+
+/*
+ * The format is '[<integer>|scan=<bool>, nmi=<bool>]'. Both options are
+ * optional. If the EFI has forced which of the multiboot payloads is to be
+ * used, only nmi=<bool> is parsed.
+ */
+static int __init parse_ucode(const char *s)
+{
+ const char *ss;
+ int val, rc = 0;
+
+ do {
+ ss = strchr(s, ',');
+ if ( !ss )
+ ss = strchr(s, '\0');
+
+ if ( (val = parse_boolean("nmi", s, ss)) >= 0 )
+ ucode_in_nmi = val;
+ else if ( !ucode_mod_forced ) /* Not forced by EFI */
+ {
+ if ( (val = parse_boolean("scan", s, ss)) >= 0 )
+ ucode_scan = val;
+ else
+ {
+ const char *q;
+
+ ucode_mod_idx = simple_strtol(s, &q, 0);
+ if ( q != ss )
+ rc = -EINVAL;
+ }
+ }
+
+ s = ss + 1;
+ } while ( *ss );
+
+ return rc;
+}
+custom_param("ucode", parse_ucode);
+
+void __init microcode_scan_module(
+ unsigned long *module_map,
+ const multiboot_info_t *mbi)
+{
+ module_t *mod = (module_t *)__va(mbi->mods_addr);
+ uint64_t *_blob_start;
+ unsigned long _blob_size;
+ struct cpio_data cd;
+ long offset;
+ const char *p = NULL;
+ int i;
+
+ ucode_blob.size = 0;
+ if ( !ucode_scan )
+ return;
+
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ p = "kernel/x86/microcode/AuthenticAMD.bin";
+ else if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ p = "kernel/x86/microcode/GenuineIntel.bin";
+ else
+ return;
+
+ /*
+ * Try all modules and see whichever could be the microcode blob.
+ */
+ for ( i = 1 /* Ignore dom0 kernel */; i < mbi->mods_count; i++ )
+ {
+ if ( !test_bit(i, module_map) )
+ continue;
+
+ _blob_start = bootstrap_map(&mod[i]);
+ _blob_size = mod[i].mod_end;
+ if ( !_blob_start )
+ {
+ printk("Could not map multiboot module #%d (size: %ld)\n",
+ i, _blob_size);
+ continue;
+ }
+ cd.data = NULL;
+ cd.size = 0;
+ cd = find_cpio_data(p, _blob_start, _blob_size, &offset /* ignore */);
+ if ( cd.data )
+ {
+ ucode_blob.size = cd.size;
+ ucode_blob.data = cd.data;
+ break;
+ }
+ bootstrap_map(NULL);
+ }
+}
+void __init microcode_grab_module(
+ unsigned long *module_map,
+ const multiboot_info_t *mbi)
+{
+ module_t *mod = (module_t *)__va(mbi->mods_addr);
+
+ if ( ucode_mod_idx < 0 )
+ ucode_mod_idx += mbi->mods_count;
+ if ( ucode_mod_idx <= 0 || ucode_mod_idx >= mbi->mods_count ||
+ !__test_and_clear_bit(ucode_mod_idx, module_map) )
+ goto scan;
+ ucode_mod = mod[ucode_mod_idx];
+scan:
+ if ( ucode_scan )
+ microcode_scan_module(module_map, mbi);
+}
+
+const struct microcode_ops *microcode_ops;
+
+static DEFINE_SPINLOCK(microcode_mutex);
+
+DEFINE_PER_CPU(struct cpu_signature, cpu_sig);
+/* Store error code of the work done in NMI handler */
+static DEFINE_PER_CPU(int, loading_err);
+
+/*
+ * Count the CPUs that have entered, exited the rendezvous and succeeded in
+ * microcode update during late microcode update respectively.
+ *
+ * Note that a bitmap is used for callin to allow cpu to set a bit multiple
+ * times. It is required to do busy-loop in #NMI handling.
+ */
+static cpumask_t cpu_callin_map;
+static atomic_t cpu_out, cpu_updated;
+static const struct microcode_patch *nmi_patch = ZERO_BLOCK_PTR;
+
+/*
+ * Return a patch that covers current CPU. If there are multiple patches,
+ * return the one with the highest revision number. Return error If no
+ * patch is found and an error occurs during the parsing process. Otherwise
+ * return NULL.
+ */
+static struct microcode_patch *parse_blob(const char *buf, size_t len)
+{
+ if ( likely(!microcode_ops->collect_cpu_info(&this_cpu(cpu_sig))) )
+ return microcode_ops->cpu_request_microcode(buf, len);
+
+ return NULL;
+}
+
+static void microcode_free_patch(struct microcode_patch *microcode_patch)
+{
+ microcode_ops->free_patch(microcode_patch->mc);
+ xfree(microcode_patch);
+}
+
+/* Return true if cache gets updated. Otherwise, return false */
+static bool microcode_update_cache(struct microcode_patch *patch)
+{
+ ASSERT(spin_is_locked(µcode_mutex));
+
+ if ( !microcode_cache )
+ microcode_cache = patch;
+ else if ( microcode_ops->compare_patch(patch,
+ microcode_cache) == NEW_UCODE )
+ {
+ microcode_free_patch(microcode_cache);
+ microcode_cache = patch;
+ }
+ else
+ {
+ microcode_free_patch(patch);
+ return false;
+ }
+
+ return true;
+}
+
+/* Wait for a condition to be met with a timeout (us). */
+static int wait_for_condition(bool (*func)(unsigned int data),
+ unsigned int data, unsigned int timeout)
+{
+ while ( !func(data) )
+ {
+ if ( !timeout-- )
+ {
+ printk("CPU%u: Timeout in %pS\n",
+ smp_processor_id(), __builtin_return_address(0));
+ return -EBUSY;
+ }
+ udelay(1);
+ }
+
+ return 0;
+}
+
+static bool wait_cpu_callin(unsigned int nr)
+{
+ return cpumask_weight(&cpu_callin_map) >= nr;
+}
+
+static bool wait_cpu_callout(unsigned int nr)
+{
+ return atomic_read(&cpu_out) >= nr;
+}
+
+/*
+ * Load a microcode update to current CPU.
+ *
+ * If no patch is provided, the cached patch will be loaded. Microcode update
+ * during APs bringup and CPU resuming falls into this case.
+ */
+static int microcode_update_cpu(const struct microcode_patch *patch)
+{
+ int err = microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
+
+ if ( unlikely(err) )
+ return err;
+
+ spin_lock(µcode_mutex);
+ if ( patch )
+ err = microcode_ops->apply_microcode(patch);
+ else if ( microcode_cache )
+ {
+ err = microcode_ops->apply_microcode(microcode_cache);
+ if ( err == -EIO )
+ {
+ microcode_free_patch(microcode_cache);
+ microcode_cache = NULL;
+ }
+ }
+ else
+ /* No patch to update */
+ err = -ENOENT;
+ spin_unlock(µcode_mutex);
+
+ return err;
+}
+
+static bool wait_for_state(typeof(loading_state) state)
+{
+ typeof(loading_state) cur_state;
+
+ while ( (cur_state = ACCESS_ONCE(loading_state)) != state )
+ {
+ if ( cur_state == LOADING_EXIT )
+ return false;
+ cpu_relax();
+ }
+
+ return true;
+}
+
+static void set_state(typeof(loading_state) state)
+{
+ ACCESS_ONCE(loading_state) = state;
+}
+
+static int secondary_nmi_work(void)
+{
+ cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
+
+ return wait_for_state(LOADING_EXIT) ? 0 : -EBUSY;
+}
+
+static int primary_thread_work(const struct microcode_patch *patch)
+{
+ int ret;
+
+ cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
+
+ if ( !wait_for_state(LOADING_ENTER) )
+ return -EBUSY;
+
+ ret = microcode_ops->apply_microcode(patch);
+ if ( !ret )
+ atomic_inc(&cpu_updated);
+ atomic_inc(&cpu_out);
+
+ return ret;
+}
+
+static int microcode_nmi_callback(const struct cpu_user_regs *regs, int cpu)
+{
+ unsigned int primary = cpumask_first(this_cpu(cpu_sibling_mask));
+ int ret;
+
+ /* System-generated NMI, leave to main handler */
+ if ( ACCESS_ONCE(loading_state) != LOADING_CALLIN )
+ return 0;
+
+ /*
+ * Primary threads load ucode in NMI handler on if ucode_in_nmi is true.
+ * Secondary threads are expected to stay in NMI handler regardless of
+ * ucode_in_nmi.
+ */
+ if ( cpu == cpumask_first(&cpu_online_map) ||
+ (!ucode_in_nmi && cpu == primary) )
+ return 0;
+
+ if ( cpu == primary )
+ ret = primary_thread_work(nmi_patch);
+ else
+ ret = secondary_nmi_work();
+ this_cpu(loading_err) = ret;
+
+ return 0;
+}
+
+static int secondary_thread_fn(void)
+{
+ if ( !wait_for_state(LOADING_CALLIN) )
+ return -EBUSY;
+
+ self_nmi();
+
+ /*
+ * Wait for ucode loading is done in case that the NMI does not arrive
+ * synchronously, which may lead to a not-yet-updated CPU signature is
+ * copied below.
+ */
+ if ( unlikely(!wait_for_state(LOADING_EXIT)) )
+ ASSERT_UNREACHABLE();
+
+ /* Copy update revision from the primary thread. */
+ this_cpu(cpu_sig).rev =
+ per_cpu(cpu_sig, cpumask_first(this_cpu(cpu_sibling_mask))).rev;
+
+ return this_cpu(loading_err);
+}
+
+static int primary_thread_fn(const struct microcode_patch *patch)
+{
+ if ( !wait_for_state(LOADING_CALLIN) )
+ return -EBUSY;
+
+ if ( ucode_in_nmi )
+ {
+ self_nmi();
+
+ /*
+ * Wait for ucode loading is done in case that the NMI does not arrive
+ * synchronously, which may lead to a not-yet-updated error is returned
+ * below.
+ */
+ if ( unlikely(!wait_for_state(LOADING_EXIT)) )
+ ASSERT_UNREACHABLE();
+
+ return this_cpu(loading_err);
+ }
+
+ return primary_thread_work(patch);
+}
+
+static int control_thread_fn(const struct microcode_patch *patch)
+{
+ unsigned int cpu = smp_processor_id(), done;
+ unsigned long tick;
+ int ret;
+ nmi_callback_t *saved_nmi_callback;
+
+ /*
+ * We intend to keep interrupt disabled for a long time, which may lead to
+ * watchdog timeout.
+ */
+ watchdog_disable();
+
+ nmi_patch = patch;
+ smp_wmb();
+ saved_nmi_callback = set_nmi_callback(microcode_nmi_callback);
+
+ /* Allow threads to call in */
+ set_state(LOADING_CALLIN);
+
+ cpumask_set_cpu(cpu, &cpu_callin_map);
+
+ /* Waiting for all threads calling in */
+ ret = wait_for_condition(wait_cpu_callin, num_online_cpus(),
+ MICROCODE_CALLIN_TIMEOUT_US);
+ if ( ret )
+ {
+ set_state(LOADING_EXIT);
+ return ret;
+ }
+
+ /* Control thread loads ucode first while others are in NMI handler. */
+ ret = microcode_ops->apply_microcode(patch);
+ if ( !ret )
+ atomic_inc(&cpu_updated);
+ atomic_inc(&cpu_out);
+
+ if ( ret == -EIO )
+ {
+ printk(XENLOG_ERR
+ "Late loading aborted: CPU%u failed to update ucode\n", cpu);
+ set_state(LOADING_EXIT);
+ return ret;
+ }
+
+ /* Let primary threads load the given ucode update */
+ set_state(LOADING_ENTER);
+
+ tick = rdtsc_ordered();
+ /* Wait for primary threads finishing update */
+ while ( (done = atomic_read(&cpu_out)) != nr_cores )
+ {
+ /*
+ * During each timeout interval, at least a CPU is expected to
+ * finish its update. Otherwise, something goes wrong.
+ *
+ * Note that RDTSC (in wait_for_condition()) is safe for threads to
+ * execute while waiting for completion of loading an update.
+ */
+ if ( wait_for_condition(wait_cpu_callout, (done + 1),
+ MICROCODE_UPDATE_TIMEOUT_US) )
+ panic("Timeout when finished updating microcode (finished %u/%u)",
+ done, nr_cores);
+
+ /* Print warning message once if long time is spent here */
+ if ( tick && rdtsc_ordered() - tick >= cpu_khz * 1000 )
+ {
+ printk(XENLOG_WARNING
+ "WARNING: UPDATING MICROCODE HAS CONSUMED MORE THAN 1 SECOND!\n");
+ tick = 0;
+ }
+ }
+
+ /* Mark loading is done to unblock other threads */
+ set_state(LOADING_EXIT);
+
+ set_nmi_callback(saved_nmi_callback);
+ smp_wmb();
+ nmi_patch = ZERO_BLOCK_PTR;
+
+ watchdog_enable();
+
+ return ret;
+}
+
+static int do_microcode_update(void *patch)
+{
+ unsigned int cpu = smp_processor_id();
+ int ret;
+
+ /*
+ * The control thread set state to coordinate ucode loading. Primary
+ * threads load the given ucode patch. Secondary threads just wait for
+ * the completion of the ucode loading process.
+ */
+ if ( cpu == cpumask_first(&cpu_online_map) )
+ ret = control_thread_fn(patch);
+ else if ( cpu == cpumask_first(this_cpu(cpu_sibling_mask)) )
+ ret = primary_thread_fn(patch);
+ else
+ ret = secondary_thread_fn();
+
+ if ( microcode_ops->end_update_percpu )
+ microcode_ops->end_update_percpu();
+
+ return ret;
+}
+
+struct ucode_buf {
+ unsigned int len;
+ char buffer[];
+};
+
+static long microcode_update_helper(void *data)
+{
+ int ret;
+ struct ucode_buf *buffer = data;
+ unsigned int cpu, updated;
+ struct microcode_patch *patch;
+
+ /* cpu_online_map must not change during update */
+ if ( !get_cpu_maps() )
+ {
+ xfree(buffer);
+ return -EBUSY;
+ }
+
+ /*
+ * CPUs except the first online CPU would send a fake (self) NMI to
+ * rendezvous in NMI handler. But a fake NMI to nmi_cpu may trigger
+ * unknown_nmi_error(). It ensures nmi_cpu won't receive a fake NMI.
+ */
+ if ( unlikely(cpumask_first(&cpu_online_map) != nmi_cpu) )
+ {
+ xfree(buffer);
+ printk(XENLOG_WARNING
+ "CPU%u is expected to lead ucode loading (but got CPU%u)\n",
+ nmi_cpu, cpumask_first(&cpu_online_map));
+ return -EPERM;
+ }
+
+ patch = parse_blob(buffer->buffer, buffer->len);
+ xfree(buffer);
+ if ( IS_ERR(patch) )
+ {
+ ret = PTR_ERR(patch);
+ printk(XENLOG_WARNING "Parsing microcode blob error %d\n", ret);
+ goto put;
+ }
+
+ if ( !patch )
+ {
+ printk(XENLOG_WARNING "microcode: couldn't find any matching ucode in "
+ "the provided blob!\n");
+ ret = -ENOENT;
+ goto put;
+ }
+
+ /*
+ * If microcode_cache exists, all CPUs in the system should have at least
+ * that ucode revision.
+ */
+ spin_lock(µcode_mutex);
+ if ( microcode_cache &&
+ microcode_ops->compare_patch(patch, microcode_cache) != NEW_UCODE )
+ {
+ spin_unlock(µcode_mutex);
+ printk(XENLOG_WARNING "microcode: couldn't find any newer revision "
+ "in the provided blob!\n");
+ microcode_free_patch(patch);
+ ret = -ENOENT;
+
+ goto put;
+ }
+ spin_unlock(µcode_mutex);
+
+ if ( microcode_ops->start_update )
+ {
+ ret = microcode_ops->start_update();
+ if ( ret )
+ {
+ microcode_free_patch(patch);
+ goto put;
+ }
+ }
+
+ cpumask_clear(&cpu_callin_map);
+ atomic_set(&cpu_out, 0);
+ atomic_set(&cpu_updated, 0);
+ loading_state = LOADING_PREPARE;
+
+ /* Calculate the number of online CPU core */
+ nr_cores = 0;
+ for_each_online_cpu(cpu)
+ if ( cpu == cpumask_first(per_cpu(cpu_sibling_mask, cpu)) )
+ nr_cores++;
+
+ printk(XENLOG_INFO "%u cores are to update their microcode\n", nr_cores);
+
+ /*
+ * Late loading dance. Why the heavy-handed stop_machine effort?
+ *
+ * - HT siblings must be idle and not execute other code while the other
+ * sibling is loading microcode in order to avoid any negative
+ * interactions cause by the loading.
+ *
+ * - In addition, microcode update on the cores must be serialized until
+ * this requirement can be relaxed in the future. Right now, this is
+ * conservative and good.
+ */
+ ret = stop_machine_run(do_microcode_update, patch, NR_CPUS);
+
+ updated = atomic_read(&cpu_updated);
+ if ( updated > 0 )
+ {
+ spin_lock(µcode_mutex);
+ microcode_update_cache(patch);
+ spin_unlock(µcode_mutex);
+ }
+ else
+ microcode_free_patch(patch);
+
+ if ( updated && updated != nr_cores )
+ printk(XENLOG_ERR "ERROR: Updating microcode succeeded on %u cores and failed\n"
+ XENLOG_ERR "on other %u cores. A system with differing microcode\n"
+ XENLOG_ERR "revisions is considered unstable. Please reboot and do not\n"
+ XENLOG_ERR "load the microcode that triggers this warning!\n",
+ updated, nr_cores - updated);
+
+ put:
+ put_cpu_maps();
+ return ret;
+}
+
+int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) buf, unsigned long len)
+{
+ int ret;
+ struct ucode_buf *buffer;
+
+ if ( len != (uint32_t)len )
+ return -E2BIG;
+
+ if ( microcode_ops == NULL )
+ return -EINVAL;
+
+ buffer = xmalloc_flex_struct(struct ucode_buf, buffer, len);
+ if ( !buffer )
+ return -ENOMEM;
+
+ ret = copy_from_guest(buffer->buffer, buf, len);
+ if ( ret )
+ {
+ xfree(buffer);
+ return -EFAULT;
+ }
+ buffer->len = len;
+
+ return continue_hypercall_on_cpu(smp_processor_id(),
+ microcode_update_helper, buffer);
+}
+
+static int __init microcode_init(void)
+{
+ /*
+ * At this point, all CPUs should have updated their microcode
+ * via the early_microcode_* paths so free the microcode blob.
+ */
+ if ( ucode_blob.size )
+ {
+ bootstrap_map(NULL);
+ ucode_blob.size = 0;
+ ucode_blob.data = NULL;
+ }
+ else if ( ucode_mod.mod_end )
+ {
+ bootstrap_map(NULL);
+ ucode_mod.mod_end = 0;
+ }
+
+ return 0;
+}
+__initcall(microcode_init);
+
+/* Load a cached update to current cpu */
+int microcode_update_one(bool start_update)
+{
+ int err;
+
+ if ( !microcode_ops )
+ return -EOPNOTSUPP;
+
+ microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
+
+ if ( start_update && microcode_ops->start_update )
+ {
+ err = microcode_ops->start_update();
+ if ( err )
+ return err;
+ }
+
+ err = microcode_update_cpu(NULL);
+
+ if ( microcode_ops->end_update_percpu )
+ microcode_ops->end_update_percpu();
+
+ return err;
+}
+
+/* BSP calls this function to parse ucode blob and then apply an update. */
+static int __init early_microcode_update_cpu(void)
+{
+ int rc = 0;
+ const void *data = NULL;
+ size_t len;
+ struct microcode_patch *patch;
+
+ if ( ucode_blob.size )
+ {
+ len = ucode_blob.size;
+ data = ucode_blob.data;
+ }
+ else if ( ucode_mod.mod_end )
+ {
+ len = ucode_mod.mod_end;
+ data = bootstrap_map(&ucode_mod);
+ }
+
+ if ( !data )
+ return -ENOMEM;
+
+ patch = parse_blob(data, len);
+ if ( IS_ERR(patch) )
+ {
+ printk(XENLOG_WARNING "Parsing microcode blob error %ld\n",
+ PTR_ERR(patch));
+ return PTR_ERR(patch);
+ }
+
+ if ( !patch )
+ return -ENOENT;
+
+ spin_lock(µcode_mutex);
+ rc = microcode_update_cache(patch);
+ spin_unlock(µcode_mutex);
+ ASSERT(rc);
+
+ return microcode_update_one(true);
+}
+
+int __init early_microcode_init(void)
+{
+ int rc;
+
+ rc = microcode_init_intel();
+ if ( rc )
+ return rc;
+
+ rc = microcode_init_amd();
+ if ( rc )
+ return rc;
+
+ if ( microcode_ops )
+ {
+ microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
+
+ if ( ucode_mod.mod_end || ucode_blob.size )
+ rc = early_microcode_update_cpu();
+ }
+
+ return rc;
+}
--- /dev/null
+/*
+ * Intel CPU Microcode Update Driver for Linux
+ *
+ * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * 2006 Shaohua Li <shaohua.li@intel.com> *
+ * This driver allows to upgrade microcode on Intel processors
+ * belonging to IA-32 family - PentiumPro, Pentium II,
+ * Pentium III, Xeon, Pentium 4, etc.
+ *
+ * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
+ * Software Developer's Manual
+ * Order Number 253668 or free download from:
+ *
+ * http://developer.intel.com/design/pentium4/manuals/253668.htm
+ *
+ * For more information, go to http://www.urbanmyth.org/microcode
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <xen/err.h>
+#include <xen/init.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+
+#include "private.h"
+
+#define pr_debug(x...) ((void)0)
+
+struct microcode_header_intel {
+ unsigned int hdrver;
+ unsigned int rev;
+ union {
+ struct {
+ uint16_t year;
+ uint8_t day;
+ uint8_t month;
+ };
+ unsigned int date;
+ };
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+};
+
+struct microcode_intel {
+ struct microcode_header_intel hdr;
+ unsigned int bits[0];
+};
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+};
+
+#define DEFAULT_UCODE_DATASIZE (2000)
+#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
+#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
+#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
+#define DWSIZE (sizeof(u32))
+#define get_totalsize(mc) \
+ (((struct microcode_intel *)mc)->hdr.totalsize ? \
+ ((struct microcode_intel *)mc)->hdr.totalsize : \
+ DEFAULT_UCODE_TOTALSIZE)
+
+#define get_datasize(mc) \
+ (((struct microcode_intel *)mc)->hdr.datasize ? \
+ ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
+
+#define sigmatch(s1, s2, p1, p2) \
+ (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
+
+#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
+
+static int collect_cpu_info(struct cpu_signature *csig)
+{
+ unsigned int cpu_num = smp_processor_id();
+ struct cpuinfo_x86 *c = &cpu_data[cpu_num];
+ uint64_t msr_content;
+
+ memset(csig, 0, sizeof(*csig));
+
+ if ( (c->x86_vendor != X86_VENDOR_INTEL) || (c->x86 < 6) )
+ {
+ printk(KERN_ERR "microcode: CPU%d not a capable Intel "
+ "processor\n", cpu_num);
+ return -1;
+ }
+
+ csig->sig = cpuid_eax(0x00000001);
+
+ if ( (c->x86_model >= 5) || (c->x86 > 6) )
+ {
+ /* get processor flags from MSR 0x17 */
+ rdmsrl(MSR_IA32_PLATFORM_ID, msr_content);
+ csig->pf = 1 << ((msr_content >> 50) & 7);
+ }
+
+ wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
+ /* As documented in the SDM: Do a CPUID 1 here */
+ cpuid_eax(1);
+
+ /* get the current revision from MSR 0x8B */
+ rdmsrl(MSR_IA32_UCODE_REV, msr_content);
+ csig->rev = (uint32_t)(msr_content >> 32);
+ pr_debug("microcode: collect_cpu_info : sig=%#x, pf=%#x, rev=%#x\n",
+ csig->sig, csig->pf, csig->rev);
+
+ return 0;
+}
+
+static int microcode_sanity_check(const void *mc)
+{
+ const struct microcode_header_intel *mc_header = mc;
+ const struct extended_sigtable *ext_header = NULL;
+ const struct extended_signature *ext_sig;
+ unsigned long total_size, data_size, ext_table_size;
+ unsigned int ext_sigcount = 0, i;
+ uint32_t sum, orig_sum;
+
+ total_size = get_totalsize(mc_header);
+ data_size = get_datasize(mc_header);
+ if ( (data_size + MC_HEADER_SIZE) > total_size )
+ {
+ printk(KERN_ERR "microcode: error! "
+ "Bad data size in microcode data file\n");
+ return -EINVAL;
+ }
+
+ if ( (mc_header->ldrver != 1) || (mc_header->hdrver != 1) )
+ {
+ printk(KERN_ERR "microcode: error! "
+ "Unknown microcode update format\n");
+ return -EINVAL;
+ }
+ ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
+ if ( ext_table_size )
+ {
+ if ( (ext_table_size < EXT_HEADER_SIZE) ||
+ ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE) )
+ {
+ printk(KERN_ERR "microcode: error! "
+ "Small exttable size in microcode data file\n");
+ return -EINVAL;
+ }
+ ext_header = mc + MC_HEADER_SIZE + data_size;
+ if ( ext_table_size != exttable_size(ext_header) )
+ {
+ printk(KERN_ERR "microcode: error! "
+ "Bad exttable size in microcode data file\n");
+ return -EFAULT;
+ }
+ ext_sigcount = ext_header->count;
+ }
+
+ /* check extended table checksum */
+ if ( ext_table_size )
+ {
+ uint32_t ext_table_sum = 0;
+ uint32_t *ext_tablep = (uint32_t *)ext_header;
+
+ i = ext_table_size / DWSIZE;
+ while ( i-- )
+ ext_table_sum += ext_tablep[i];
+ if ( ext_table_sum )
+ {
+ printk(KERN_WARNING "microcode: aborting, "
+ "bad extended signature table checksum\n");
+ return -EINVAL;
+ }
+ }
+
+ /* calculate the checksum */
+ orig_sum = 0;
+ i = (MC_HEADER_SIZE + data_size) / DWSIZE;
+ while ( i-- )
+ orig_sum += ((uint32_t *)mc)[i];
+ if ( orig_sum )
+ {
+ printk(KERN_ERR "microcode: aborting, bad checksum\n");
+ return -EINVAL;
+ }
+ if ( !ext_table_size )
+ return 0;
+ /* check extended signature checksum */
+ for ( i = 0; i < ext_sigcount; i++ )
+ {
+ ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
+ EXT_SIGNATURE_SIZE * i;
+ sum = orig_sum
+ - (mc_header->sig + mc_header->pf + mc_header->cksum)
+ + (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
+ if ( sum )
+ {
+ printk(KERN_ERR "microcode: aborting, bad checksum\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/* Check an update against the CPU signature and current update revision */
+static enum microcode_match_result microcode_update_match(
+ const struct microcode_header_intel *mc_header)
+{
+ const struct extended_sigtable *ext_header;
+ const struct extended_signature *ext_sig;
+ unsigned int i;
+ struct cpu_signature *cpu_sig = &this_cpu(cpu_sig);
+ unsigned int sig = cpu_sig->sig;
+ unsigned int pf = cpu_sig->pf;
+ unsigned int rev = cpu_sig->rev;
+ unsigned long data_size = get_datasize(mc_header);
+ const void *end = (const void *)mc_header + get_totalsize(mc_header);
+
+ ASSERT(!microcode_sanity_check(mc_header));
+ if ( sigmatch(sig, mc_header->sig, pf, mc_header->pf) )
+ return (mc_header->rev > rev) ? NEW_UCODE : OLD_UCODE;
+
+ ext_header = (const void *)(mc_header + 1) + data_size;
+ ext_sig = (const void *)(ext_header + 1);
+
+ /*
+ * Make sure there is enough space to hold an extended header and enough
+ * array elements.
+ */
+ if ( end <= (const void *)ext_sig )
+ return MIS_UCODE;
+
+ for ( i = 0; i < ext_header->count; i++ )
+ if ( sigmatch(sig, ext_sig[i].sig, pf, ext_sig[i].pf) )
+ return (mc_header->rev > rev) ? NEW_UCODE : OLD_UCODE;
+
+ return MIS_UCODE;
+}
+
+static bool match_cpu(const struct microcode_patch *patch)
+{
+ if ( !patch )
+ return false;
+
+ return microcode_update_match(&patch->mc_intel->hdr) == NEW_UCODE;
+}
+
+static void free_patch(void *mc)
+{
+ xfree(mc);
+}
+
+static enum microcode_match_result compare_patch(
+ const struct microcode_patch *new, const struct microcode_patch *old)
+{
+ /*
+ * Both patches to compare are supposed to be applicable to local CPU.
+ * Just compare the revision number.
+ */
+ ASSERT(microcode_update_match(&old->mc_intel->hdr) != MIS_UCODE);
+ ASSERT(microcode_update_match(&new->mc_intel->hdr) != MIS_UCODE);
+
+ return (new->mc_intel->hdr.rev > old->mc_intel->hdr.rev) ? NEW_UCODE
+ : OLD_UCODE;
+}
+
+static int apply_microcode(const struct microcode_patch *patch)
+{
+ uint64_t msr_content;
+ unsigned int val[2];
+ unsigned int cpu_num = raw_smp_processor_id();
+ struct cpu_signature *sig = &this_cpu(cpu_sig);
+ const struct microcode_intel *mc_intel;
+
+ if ( !patch )
+ return -ENOENT;
+
+ if ( !match_cpu(patch) )
+ return -EINVAL;
+
+ mc_intel = patch->mc_intel;
+
+ BUG_ON(local_irq_is_enabled());
+
+ /* write microcode via MSR 0x79 */
+ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc_intel->bits);
+ wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
+
+ /* As documented in the SDM: Do a CPUID 1 here */
+ cpuid_eax(1);
+
+ /* get the current revision from MSR 0x8B */
+ rdmsrl(MSR_IA32_UCODE_REV, msr_content);
+ val[1] = (uint32_t)(msr_content >> 32);
+
+ if ( val[1] != mc_intel->hdr.rev )
+ {
+ printk(KERN_ERR "microcode: CPU%d update from revision "
+ "%#x to %#x failed. Resulting revision is %#x.\n", cpu_num,
+ sig->rev, mc_intel->hdr.rev, val[1]);
+ return -EIO;
+ }
+ printk(KERN_INFO "microcode: CPU%d updated from revision "
+ "%#x to %#x, date = %04x-%02x-%02x\n",
+ cpu_num, sig->rev, val[1], mc_intel->hdr.year,
+ mc_intel->hdr.month, mc_intel->hdr.day);
+ sig->rev = val[1];
+
+ return 0;
+}
+
+static long get_next_ucode_from_buffer(struct microcode_intel **mc,
+ const uint8_t *buf, unsigned long size,
+ unsigned long offset)
+{
+ struct microcode_header_intel *mc_header;
+ unsigned long total_size;
+
+ /* No more data */
+ if ( offset >= size )
+ return 0;
+ mc_header = (struct microcode_header_intel *)(buf + offset);
+ total_size = get_totalsize(mc_header);
+
+ if ( (offset + total_size) > size )
+ {
+ printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
+ return -EINVAL;
+ }
+
+ *mc = xmalloc_bytes(total_size);
+ if ( *mc == NULL )
+ {
+ printk(KERN_ERR "microcode: error! Can not allocate memory\n");
+ return -ENOMEM;
+ }
+ memcpy(*mc, (const void *)(buf + offset), total_size);
+ return offset + total_size;
+}
+
+static struct microcode_patch *cpu_request_microcode(const void *buf,
+ size_t size)
+{
+ long offset = 0;
+ int error = 0;
+ struct microcode_intel *mc, *saved = NULL;
+ struct microcode_patch *patch = NULL;
+
+ while ( (offset = get_next_ucode_from_buffer(&mc, buf, size, offset)) > 0 )
+ {
+ error = microcode_sanity_check(mc);
+ if ( error )
+ {
+ xfree(mc);
+ break;
+ }
+
+ /*
+ * If the new update covers current CPU, compare updates and store the
+ * one with higher revision.
+ */
+ if ( (microcode_update_match(&mc->hdr) != MIS_UCODE) &&
+ (!saved || (mc->hdr.rev > saved->hdr.rev)) )
+ {
+ xfree(saved);
+ saved = mc;
+ }
+ else
+ xfree(mc);
+ }
+ if ( offset < 0 )
+ error = offset;
+
+ if ( saved )
+ {
+ patch = xmalloc(struct microcode_patch);
+ if ( patch )
+ patch->mc_intel = saved;
+ else
+ {
+ xfree(saved);
+ error = -ENOMEM;
+ }
+ }
+
+ if ( error && !patch )
+ patch = ERR_PTR(error);
+
+ return patch;
+}
+
+static const struct microcode_ops microcode_intel_ops = {
+ .cpu_request_microcode = cpu_request_microcode,
+ .collect_cpu_info = collect_cpu_info,
+ .apply_microcode = apply_microcode,
+ .free_patch = free_patch,
+ .compare_patch = compare_patch,
+ .match_cpu = match_cpu,
+};
+
+int __init microcode_init_intel(void)
+{
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ microcode_ops = µcode_intel_ops;
+ return 0;
+}
--- /dev/null
+#ifndef ASM_X86_MICROCODE_PRIVATE_H
+#define ASM_X86_MICROCODE_PRIVATE_H
+
+#include <xen/types.h>
+
+#include <asm/microcode.h>
+
+enum microcode_match_result {
+ OLD_UCODE, /* signature matched, but revision id is older or equal */
+ NEW_UCODE, /* signature matched, but revision id is newer */
+ MIS_UCODE, /* signature mismatched */
+};
+
+struct microcode_patch {
+ union {
+ struct microcode_intel *mc_intel;
+ struct microcode_amd *mc_amd;
+ void *mc;
+ };
+};
+
+struct microcode_ops {
+ struct microcode_patch *(*cpu_request_microcode)(const void *buf,
+ size_t size);
+ int (*collect_cpu_info)(struct cpu_signature *csig);
+ int (*apply_microcode)(const struct microcode_patch *patch);
+ int (*start_update)(void);
+ void (*end_update_percpu)(void);
+ void (*free_patch)(void *mc);
+ bool (*match_cpu)(const struct microcode_patch *patch);
+ enum microcode_match_result (*compare_patch)(
+ const struct microcode_patch *new, const struct microcode_patch *old);
+};
+
+extern const struct microcode_ops *microcode_ops;
+
+#endif /* ASM_X86_MICROCODE_PRIVATE_H */
+++ /dev/null
-/*
- * Intel CPU Microcode Update Driver for Linux
- *
- * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
- * 2006 Shaohua Li <shaohua.li@intel.com> *
- * This driver allows to upgrade microcode on Intel processors
- * belonging to IA-32 family - PentiumPro, Pentium II,
- * Pentium III, Xeon, Pentium 4, etc.
- *
- * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
- * Software Developer's Manual
- * Order Number 253668 or free download from:
- *
- * http://developer.intel.com/design/pentium4/manuals/253668.htm
- *
- * For more information, go to http://www.urbanmyth.org/microcode
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <xen/cpu.h>
-#include <xen/err.h>
-#include <xen/init.h>
-#include <xen/kernel.h>
-#include <xen/lib.h>
-#include <xen/notifier.h>
-#include <xen/param.h>
-#include <xen/sched.h>
-#include <xen/smp.h>
-#include <xen/softirq.h>
-#include <xen/spinlock.h>
-#include <xen/stop_machine.h>
-#include <xen/tasklet.h>
-#include <xen/guest_access.h>
-#include <xen/earlycpio.h>
-#include <xen/watchdog.h>
-
-#include <asm/apic.h>
-#include <asm/delay.h>
-#include <asm/msr.h>
-#include <asm/nmi.h>
-#include <asm/processor.h>
-#include <asm/setup.h>
-#include <asm/microcode.h>
-
-/*
- * Before performing a late microcode update on any thread, we
- * rendezvous all cpus in stop_machine context. The timeout for
- * waiting for cpu rendezvous is 30ms. It is the timeout used by
- * live patching
- */
-#define MICROCODE_CALLIN_TIMEOUT_US 30000
-
-/*
- * Timeout for each thread to complete update is set to 1s. It is a
- * conservative choice considering all possible interference.
- */
-#define MICROCODE_UPDATE_TIMEOUT_US 1000000
-
-static module_t __initdata ucode_mod;
-static signed int __initdata ucode_mod_idx;
-static bool_t __initdata ucode_mod_forced;
-static unsigned int nr_cores;
-
-/*
- * These states help to coordinate CPUs during loading an update.
- *
- * The semantics of each state is as follow:
- * - LOADING_PREPARE: initial state of 'loading_state'.
- * - LOADING_CALLIN: CPUs are allowed to callin.
- * - LOADING_ENTER: all CPUs have called in. Initiate ucode loading.
- * - LOADING_EXIT: ucode loading is done or aborted.
- */
-static enum {
- LOADING_PREPARE,
- LOADING_CALLIN,
- LOADING_ENTER,
- LOADING_EXIT,
-} loading_state;
-
-/*
- * If we scan the initramfs.cpio for the early microcode code
- * and find it, then 'ucode_blob' will contain the pointer
- * and the size of said blob. It is allocated from Xen's heap
- * memory.
- */
-struct ucode_mod_blob {
- const void *data;
- size_t size;
-};
-
-static struct ucode_mod_blob __initdata ucode_blob;
-/*
- * By default we will NOT parse the multiboot modules to see if there is
- * cpio image with the microcode images.
- */
-static bool_t __initdata ucode_scan;
-
-/* By default, ucode loading is done in NMI handler */
-static bool ucode_in_nmi = true;
-
-/* Protected by microcode_mutex */
-static struct microcode_patch *microcode_cache;
-
-void __init microcode_set_module(unsigned int idx)
-{
- ucode_mod_idx = idx;
- ucode_mod_forced = 1;
-}
-
-/*
- * The format is '[<integer>|scan=<bool>, nmi=<bool>]'. Both options are
- * optional. If the EFI has forced which of the multiboot payloads is to be
- * used, only nmi=<bool> is parsed.
- */
-static int __init parse_ucode(const char *s)
-{
- const char *ss;
- int val, rc = 0;
-
- do {
- ss = strchr(s, ',');
- if ( !ss )
- ss = strchr(s, '\0');
-
- if ( (val = parse_boolean("nmi", s, ss)) >= 0 )
- ucode_in_nmi = val;
- else if ( !ucode_mod_forced ) /* Not forced by EFI */
- {
- if ( (val = parse_boolean("scan", s, ss)) >= 0 )
- ucode_scan = val;
- else
- {
- const char *q;
-
- ucode_mod_idx = simple_strtol(s, &q, 0);
- if ( q != ss )
- rc = -EINVAL;
- }
- }
-
- s = ss + 1;
- } while ( *ss );
-
- return rc;
-}
-custom_param("ucode", parse_ucode);
-
-void __init microcode_scan_module(
- unsigned long *module_map,
- const multiboot_info_t *mbi)
-{
- module_t *mod = (module_t *)__va(mbi->mods_addr);
- uint64_t *_blob_start;
- unsigned long _blob_size;
- struct cpio_data cd;
- long offset;
- const char *p = NULL;
- int i;
-
- ucode_blob.size = 0;
- if ( !ucode_scan )
- return;
-
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- p = "kernel/x86/microcode/AuthenticAMD.bin";
- else if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
- p = "kernel/x86/microcode/GenuineIntel.bin";
- else
- return;
-
- /*
- * Try all modules and see whichever could be the microcode blob.
- */
- for ( i = 1 /* Ignore dom0 kernel */; i < mbi->mods_count; i++ )
- {
- if ( !test_bit(i, module_map) )
- continue;
-
- _blob_start = bootstrap_map(&mod[i]);
- _blob_size = mod[i].mod_end;
- if ( !_blob_start )
- {
- printk("Could not map multiboot module #%d (size: %ld)\n",
- i, _blob_size);
- continue;
- }
- cd.data = NULL;
- cd.size = 0;
- cd = find_cpio_data(p, _blob_start, _blob_size, &offset /* ignore */);
- if ( cd.data )
- {
- ucode_blob.size = cd.size;
- ucode_blob.data = cd.data;
- break;
- }
- bootstrap_map(NULL);
- }
-}
-void __init microcode_grab_module(
- unsigned long *module_map,
- const multiboot_info_t *mbi)
-{
- module_t *mod = (module_t *)__va(mbi->mods_addr);
-
- if ( ucode_mod_idx < 0 )
- ucode_mod_idx += mbi->mods_count;
- if ( ucode_mod_idx <= 0 || ucode_mod_idx >= mbi->mods_count ||
- !__test_and_clear_bit(ucode_mod_idx, module_map) )
- goto scan;
- ucode_mod = mod[ucode_mod_idx];
-scan:
- if ( ucode_scan )
- microcode_scan_module(module_map, mbi);
-}
-
-const struct microcode_ops *microcode_ops;
-
-static DEFINE_SPINLOCK(microcode_mutex);
-
-DEFINE_PER_CPU(struct cpu_signature, cpu_sig);
-/* Store error code of the work done in NMI handler */
-static DEFINE_PER_CPU(int, loading_err);
-
-/*
- * Count the CPUs that have entered, exited the rendezvous and succeeded in
- * microcode update during late microcode update respectively.
- *
- * Note that a bitmap is used for callin to allow cpu to set a bit multiple
- * times. It is required to do busy-loop in #NMI handling.
- */
-static cpumask_t cpu_callin_map;
-static atomic_t cpu_out, cpu_updated;
-static const struct microcode_patch *nmi_patch = ZERO_BLOCK_PTR;
-
-/*
- * Return a patch that covers current CPU. If there are multiple patches,
- * return the one with the highest revision number. Return error If no
- * patch is found and an error occurs during the parsing process. Otherwise
- * return NULL.
- */
-static struct microcode_patch *parse_blob(const char *buf, size_t len)
-{
- if ( likely(!microcode_ops->collect_cpu_info(&this_cpu(cpu_sig))) )
- return microcode_ops->cpu_request_microcode(buf, len);
-
- return NULL;
-}
-
-static void microcode_free_patch(struct microcode_patch *microcode_patch)
-{
- microcode_ops->free_patch(microcode_patch->mc);
- xfree(microcode_patch);
-}
-
-/* Return true if cache gets updated. Otherwise, return false */
-static bool microcode_update_cache(struct microcode_patch *patch)
-{
- ASSERT(spin_is_locked(µcode_mutex));
-
- if ( !microcode_cache )
- microcode_cache = patch;
- else if ( microcode_ops->compare_patch(patch,
- microcode_cache) == NEW_UCODE )
- {
- microcode_free_patch(microcode_cache);
- microcode_cache = patch;
- }
- else
- {
- microcode_free_patch(patch);
- return false;
- }
-
- return true;
-}
-
-/* Wait for a condition to be met with a timeout (us). */
-static int wait_for_condition(bool (*func)(unsigned int data),
- unsigned int data, unsigned int timeout)
-{
- while ( !func(data) )
- {
- if ( !timeout-- )
- {
- printk("CPU%u: Timeout in %pS\n",
- smp_processor_id(), __builtin_return_address(0));
- return -EBUSY;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static bool wait_cpu_callin(unsigned int nr)
-{
- return cpumask_weight(&cpu_callin_map) >= nr;
-}
-
-static bool wait_cpu_callout(unsigned int nr)
-{
- return atomic_read(&cpu_out) >= nr;
-}
-
-/*
- * Load a microcode update to current CPU.
- *
- * If no patch is provided, the cached patch will be loaded. Microcode update
- * during APs bringup and CPU resuming falls into this case.
- */
-static int microcode_update_cpu(const struct microcode_patch *patch)
-{
- int err = microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
-
- if ( unlikely(err) )
- return err;
-
- spin_lock(µcode_mutex);
- if ( patch )
- err = microcode_ops->apply_microcode(patch);
- else if ( microcode_cache )
- {
- err = microcode_ops->apply_microcode(microcode_cache);
- if ( err == -EIO )
- {
- microcode_free_patch(microcode_cache);
- microcode_cache = NULL;
- }
- }
- else
- /* No patch to update */
- err = -ENOENT;
- spin_unlock(µcode_mutex);
-
- return err;
-}
-
-static bool wait_for_state(typeof(loading_state) state)
-{
- typeof(loading_state) cur_state;
-
- while ( (cur_state = ACCESS_ONCE(loading_state)) != state )
- {
- if ( cur_state == LOADING_EXIT )
- return false;
- cpu_relax();
- }
-
- return true;
-}
-
-static void set_state(typeof(loading_state) state)
-{
- ACCESS_ONCE(loading_state) = state;
-}
-
-static int secondary_nmi_work(void)
-{
- cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
-
- return wait_for_state(LOADING_EXIT) ? 0 : -EBUSY;
-}
-
-static int primary_thread_work(const struct microcode_patch *patch)
-{
- int ret;
-
- cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
-
- if ( !wait_for_state(LOADING_ENTER) )
- return -EBUSY;
-
- ret = microcode_ops->apply_microcode(patch);
- if ( !ret )
- atomic_inc(&cpu_updated);
- atomic_inc(&cpu_out);
-
- return ret;
-}
-
-static int microcode_nmi_callback(const struct cpu_user_regs *regs, int cpu)
-{
- unsigned int primary = cpumask_first(this_cpu(cpu_sibling_mask));
- int ret;
-
- /* System-generated NMI, leave to main handler */
- if ( ACCESS_ONCE(loading_state) != LOADING_CALLIN )
- return 0;
-
- /*
- * Primary threads load ucode in NMI handler on if ucode_in_nmi is true.
- * Secondary threads are expected to stay in NMI handler regardless of
- * ucode_in_nmi.
- */
- if ( cpu == cpumask_first(&cpu_online_map) ||
- (!ucode_in_nmi && cpu == primary) )
- return 0;
-
- if ( cpu == primary )
- ret = primary_thread_work(nmi_patch);
- else
- ret = secondary_nmi_work();
- this_cpu(loading_err) = ret;
-
- return 0;
-}
-
-static int secondary_thread_fn(void)
-{
- if ( !wait_for_state(LOADING_CALLIN) )
- return -EBUSY;
-
- self_nmi();
-
- /*
- * Wait for ucode loading is done in case that the NMI does not arrive
- * synchronously, which may lead to a not-yet-updated CPU signature is
- * copied below.
- */
- if ( unlikely(!wait_for_state(LOADING_EXIT)) )
- ASSERT_UNREACHABLE();
-
- /* Copy update revision from the primary thread. */
- this_cpu(cpu_sig).rev =
- per_cpu(cpu_sig, cpumask_first(this_cpu(cpu_sibling_mask))).rev;
-
- return this_cpu(loading_err);
-}
-
-static int primary_thread_fn(const struct microcode_patch *patch)
-{
- if ( !wait_for_state(LOADING_CALLIN) )
- return -EBUSY;
-
- if ( ucode_in_nmi )
- {
- self_nmi();
-
- /*
- * Wait for ucode loading is done in case that the NMI does not arrive
- * synchronously, which may lead to a not-yet-updated error is returned
- * below.
- */
- if ( unlikely(!wait_for_state(LOADING_EXIT)) )
- ASSERT_UNREACHABLE();
-
- return this_cpu(loading_err);
- }
-
- return primary_thread_work(patch);
-}
-
-static int control_thread_fn(const struct microcode_patch *patch)
-{
- unsigned int cpu = smp_processor_id(), done;
- unsigned long tick;
- int ret;
- nmi_callback_t *saved_nmi_callback;
-
- /*
- * We intend to keep interrupt disabled for a long time, which may lead to
- * watchdog timeout.
- */
- watchdog_disable();
-
- nmi_patch = patch;
- smp_wmb();
- saved_nmi_callback = set_nmi_callback(microcode_nmi_callback);
-
- /* Allow threads to call in */
- set_state(LOADING_CALLIN);
-
- cpumask_set_cpu(cpu, &cpu_callin_map);
-
- /* Waiting for all threads calling in */
- ret = wait_for_condition(wait_cpu_callin, num_online_cpus(),
- MICROCODE_CALLIN_TIMEOUT_US);
- if ( ret )
- {
- set_state(LOADING_EXIT);
- return ret;
- }
-
- /* Control thread loads ucode first while others are in NMI handler. */
- ret = microcode_ops->apply_microcode(patch);
- if ( !ret )
- atomic_inc(&cpu_updated);
- atomic_inc(&cpu_out);
-
- if ( ret == -EIO )
- {
- printk(XENLOG_ERR
- "Late loading aborted: CPU%u failed to update ucode\n", cpu);
- set_state(LOADING_EXIT);
- return ret;
- }
-
- /* Let primary threads load the given ucode update */
- set_state(LOADING_ENTER);
-
- tick = rdtsc_ordered();
- /* Wait for primary threads finishing update */
- while ( (done = atomic_read(&cpu_out)) != nr_cores )
- {
- /*
- * During each timeout interval, at least a CPU is expected to
- * finish its update. Otherwise, something goes wrong.
- *
- * Note that RDTSC (in wait_for_condition()) is safe for threads to
- * execute while waiting for completion of loading an update.
- */
- if ( wait_for_condition(wait_cpu_callout, (done + 1),
- MICROCODE_UPDATE_TIMEOUT_US) )
- panic("Timeout when finished updating microcode (finished %u/%u)",
- done, nr_cores);
-
- /* Print warning message once if long time is spent here */
- if ( tick && rdtsc_ordered() - tick >= cpu_khz * 1000 )
- {
- printk(XENLOG_WARNING
- "WARNING: UPDATING MICROCODE HAS CONSUMED MORE THAN 1 SECOND!\n");
- tick = 0;
- }
- }
-
- /* Mark loading is done to unblock other threads */
- set_state(LOADING_EXIT);
-
- set_nmi_callback(saved_nmi_callback);
- smp_wmb();
- nmi_patch = ZERO_BLOCK_PTR;
-
- watchdog_enable();
-
- return ret;
-}
-
-static int do_microcode_update(void *patch)
-{
- unsigned int cpu = smp_processor_id();
- int ret;
-
- /*
- * The control thread set state to coordinate ucode loading. Primary
- * threads load the given ucode patch. Secondary threads just wait for
- * the completion of the ucode loading process.
- */
- if ( cpu == cpumask_first(&cpu_online_map) )
- ret = control_thread_fn(patch);
- else if ( cpu == cpumask_first(this_cpu(cpu_sibling_mask)) )
- ret = primary_thread_fn(patch);
- else
- ret = secondary_thread_fn();
-
- if ( microcode_ops->end_update_percpu )
- microcode_ops->end_update_percpu();
-
- return ret;
-}
-
-struct ucode_buf {
- unsigned int len;
- char buffer[];
-};
-
-static long microcode_update_helper(void *data)
-{
- int ret;
- struct ucode_buf *buffer = data;
- unsigned int cpu, updated;
- struct microcode_patch *patch;
-
- /* cpu_online_map must not change during update */
- if ( !get_cpu_maps() )
- {
- xfree(buffer);
- return -EBUSY;
- }
-
- /*
- * CPUs except the first online CPU would send a fake (self) NMI to
- * rendezvous in NMI handler. But a fake NMI to nmi_cpu may trigger
- * unknown_nmi_error(). It ensures nmi_cpu won't receive a fake NMI.
- */
- if ( unlikely(cpumask_first(&cpu_online_map) != nmi_cpu) )
- {
- xfree(buffer);
- printk(XENLOG_WARNING
- "CPU%u is expected to lead ucode loading (but got CPU%u)\n",
- nmi_cpu, cpumask_first(&cpu_online_map));
- return -EPERM;
- }
-
- patch = parse_blob(buffer->buffer, buffer->len);
- xfree(buffer);
- if ( IS_ERR(patch) )
- {
- ret = PTR_ERR(patch);
- printk(XENLOG_WARNING "Parsing microcode blob error %d\n", ret);
- goto put;
- }
-
- if ( !patch )
- {
- printk(XENLOG_WARNING "microcode: couldn't find any matching ucode in "
- "the provided blob!\n");
- ret = -ENOENT;
- goto put;
- }
-
- /*
- * If microcode_cache exists, all CPUs in the system should have at least
- * that ucode revision.
- */
- spin_lock(µcode_mutex);
- if ( microcode_cache &&
- microcode_ops->compare_patch(patch, microcode_cache) != NEW_UCODE )
- {
- spin_unlock(µcode_mutex);
- printk(XENLOG_WARNING "microcode: couldn't find any newer revision "
- "in the provided blob!\n");
- microcode_free_patch(patch);
- ret = -ENOENT;
-
- goto put;
- }
- spin_unlock(µcode_mutex);
-
- if ( microcode_ops->start_update )
- {
- ret = microcode_ops->start_update();
- if ( ret )
- {
- microcode_free_patch(patch);
- goto put;
- }
- }
-
- cpumask_clear(&cpu_callin_map);
- atomic_set(&cpu_out, 0);
- atomic_set(&cpu_updated, 0);
- loading_state = LOADING_PREPARE;
-
- /* Calculate the number of online CPU core */
- nr_cores = 0;
- for_each_online_cpu(cpu)
- if ( cpu == cpumask_first(per_cpu(cpu_sibling_mask, cpu)) )
- nr_cores++;
-
- printk(XENLOG_INFO "%u cores are to update their microcode\n", nr_cores);
-
- /*
- * Late loading dance. Why the heavy-handed stop_machine effort?
- *
- * - HT siblings must be idle and not execute other code while the other
- * sibling is loading microcode in order to avoid any negative
- * interactions cause by the loading.
- *
- * - In addition, microcode update on the cores must be serialized until
- * this requirement can be relaxed in the future. Right now, this is
- * conservative and good.
- */
- ret = stop_machine_run(do_microcode_update, patch, NR_CPUS);
-
- updated = atomic_read(&cpu_updated);
- if ( updated > 0 )
- {
- spin_lock(µcode_mutex);
- microcode_update_cache(patch);
- spin_unlock(µcode_mutex);
- }
- else
- microcode_free_patch(patch);
-
- if ( updated && updated != nr_cores )
- printk(XENLOG_ERR "ERROR: Updating microcode succeeded on %u cores and failed\n"
- XENLOG_ERR "on other %u cores. A system with differing microcode\n"
- XENLOG_ERR "revisions is considered unstable. Please reboot and do not\n"
- XENLOG_ERR "load the microcode that triggers this warning!\n",
- updated, nr_cores - updated);
-
- put:
- put_cpu_maps();
- return ret;
-}
-
-int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) buf, unsigned long len)
-{
- int ret;
- struct ucode_buf *buffer;
-
- if ( len != (uint32_t)len )
- return -E2BIG;
-
- if ( microcode_ops == NULL )
- return -EINVAL;
-
- buffer = xmalloc_flex_struct(struct ucode_buf, buffer, len);
- if ( !buffer )
- return -ENOMEM;
-
- ret = copy_from_guest(buffer->buffer, buf, len);
- if ( ret )
- {
- xfree(buffer);
- return -EFAULT;
- }
- buffer->len = len;
-
- return continue_hypercall_on_cpu(smp_processor_id(),
- microcode_update_helper, buffer);
-}
-
-static int __init microcode_init(void)
-{
- /*
- * At this point, all CPUs should have updated their microcode
- * via the early_microcode_* paths so free the microcode blob.
- */
- if ( ucode_blob.size )
- {
- bootstrap_map(NULL);
- ucode_blob.size = 0;
- ucode_blob.data = NULL;
- }
- else if ( ucode_mod.mod_end )
- {
- bootstrap_map(NULL);
- ucode_mod.mod_end = 0;
- }
-
- return 0;
-}
-__initcall(microcode_init);
-
-/* Load a cached update to current cpu */
-int microcode_update_one(bool start_update)
-{
- int err;
-
- if ( !microcode_ops )
- return -EOPNOTSUPP;
-
- microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
-
- if ( start_update && microcode_ops->start_update )
- {
- err = microcode_ops->start_update();
- if ( err )
- return err;
- }
-
- err = microcode_update_cpu(NULL);
-
- if ( microcode_ops->end_update_percpu )
- microcode_ops->end_update_percpu();
-
- return err;
-}
-
-/* BSP calls this function to parse ucode blob and then apply an update. */
-static int __init early_microcode_update_cpu(void)
-{
- int rc = 0;
- const void *data = NULL;
- size_t len;
- struct microcode_patch *patch;
-
- if ( ucode_blob.size )
- {
- len = ucode_blob.size;
- data = ucode_blob.data;
- }
- else if ( ucode_mod.mod_end )
- {
- len = ucode_mod.mod_end;
- data = bootstrap_map(&ucode_mod);
- }
-
- if ( !data )
- return -ENOMEM;
-
- patch = parse_blob(data, len);
- if ( IS_ERR(patch) )
- {
- printk(XENLOG_WARNING "Parsing microcode blob error %ld\n",
- PTR_ERR(patch));
- return PTR_ERR(patch);
- }
-
- if ( !patch )
- return -ENOENT;
-
- spin_lock(µcode_mutex);
- rc = microcode_update_cache(patch);
- spin_unlock(µcode_mutex);
- ASSERT(rc);
-
- return microcode_update_one(true);
-}
-
-int __init early_microcode_init(void)
-{
- int rc;
-
- rc = microcode_init_intel();
- if ( rc )
- return rc;
-
- rc = microcode_init_amd();
- if ( rc )
- return rc;
-
- if ( microcode_ops )
- {
- microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
-
- if ( ucode_mod.mod_end || ucode_blob.size )
- rc = early_microcode_update_cpu();
- }
-
- return rc;
-}
+++ /dev/null
-/*
- * AMD CPU Microcode Update Driver for Linux
- * Copyright (C) 2008 Advanced Micro Devices Inc.
- *
- * Author: Peter Oruba <peter.oruba@amd.com>
- *
- * Based on work by:
- * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
- *
- * This driver allows to upgrade microcode on AMD
- * family 0x10 and later.
- *
- * Licensed unter the terms of the GNU General Public
- * License version 2. See file COPYING for details.
- */
-
-#include <xen/err.h>
-#include <xen/init.h>
-#include <xen/kernel.h>
-#include <xen/lib.h>
-#include <xen/sched.h>
-#include <xen/smp.h>
-#include <xen/spinlock.h>
-
-#include <asm/msr.h>
-#include <asm/processor.h>
-#include <asm/microcode.h>
-#include <asm/hvm/svm/svm.h>
-
-#define pr_debug(x...) ((void)0)
-
-#define CONT_HDR_SIZE 12
-#define SECTION_HDR_SIZE 8
-#define PATCH_HDR_SIZE 32
-
-struct __packed equiv_cpu_entry {
- uint32_t installed_cpu;
- uint32_t fixed_errata_mask;
- uint32_t fixed_errata_compare;
- uint16_t equiv_cpu;
- uint16_t reserved;
-};
-
-struct __packed microcode_header_amd {
- uint32_t data_code;
- uint32_t patch_id;
- uint8_t mc_patch_data_id[2];
- uint8_t mc_patch_data_len;
- uint8_t init_flag;
- uint32_t mc_patch_data_checksum;
- uint32_t nb_dev_id;
- uint32_t sb_dev_id;
- uint16_t processor_rev_id;
- uint8_t nb_rev_id;
- uint8_t sb_rev_id;
- uint8_t bios_api_rev;
- uint8_t reserved1[3];
- uint32_t match_reg[8];
-};
-
-#define UCODE_MAGIC 0x00414d44
-#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
-#define UCODE_UCODE_TYPE 0x00000001
-
-struct microcode_amd {
- void *mpb;
- size_t mpb_size;
- struct equiv_cpu_entry *equiv_cpu_table;
- size_t equiv_cpu_table_size;
-};
-
-struct mpbhdr {
- uint32_t type;
- uint32_t len;
- uint8_t data[];
-};
-
-/* See comment in start_update() for cases when this routine fails */
-static int collect_cpu_info(struct cpu_signature *csig)
-{
- unsigned int cpu = smp_processor_id();
- struct cpuinfo_x86 *c = &cpu_data[cpu];
-
- memset(csig, 0, sizeof(*csig));
-
- if ( (c->x86_vendor != X86_VENDOR_AMD) || (c->x86 < 0x10) )
- {
- printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n",
- cpu);
- return -EINVAL;
- }
-
- rdmsrl(MSR_AMD_PATCHLEVEL, csig->rev);
-
- pr_debug("microcode: CPU%d collect_cpu_info: patch_id=%#x\n",
- cpu, csig->rev);
-
- return 0;
-}
-
-static bool_t verify_patch_size(uint32_t patch_size)
-{
- uint32_t max_size;
-
-#define F1XH_MPB_MAX_SIZE 2048
-#define F14H_MPB_MAX_SIZE 1824
-#define F15H_MPB_MAX_SIZE 4096
-#define F16H_MPB_MAX_SIZE 3458
-#define F17H_MPB_MAX_SIZE 3200
-
- switch (boot_cpu_data.x86)
- {
- case 0x14:
- max_size = F14H_MPB_MAX_SIZE;
- break;
- case 0x15:
- max_size = F15H_MPB_MAX_SIZE;
- break;
- case 0x16:
- max_size = F16H_MPB_MAX_SIZE;
- break;
- case 0x17:
- max_size = F17H_MPB_MAX_SIZE;
- break;
- default:
- max_size = F1XH_MPB_MAX_SIZE;
- break;
- }
-
- return (patch_size <= max_size);
-}
-
-static bool_t find_equiv_cpu_id(const struct equiv_cpu_entry *equiv_cpu_table,
- unsigned int current_cpu_id,
- unsigned int *equiv_cpu_id)
-{
- unsigned int i;
-
- if ( !equiv_cpu_table )
- return 0;
-
- for ( i = 0; equiv_cpu_table[i].installed_cpu != 0; i++ )
- {
- if ( current_cpu_id == equiv_cpu_table[i].installed_cpu )
- {
- *equiv_cpu_id = equiv_cpu_table[i].equiv_cpu & 0xffff;
- return 1;
- }
- }
-
- return 0;
-}
-
-static enum microcode_match_result microcode_fits(
- const struct microcode_amd *mc_amd)
-{
- unsigned int cpu = smp_processor_id();
- const struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
- const struct microcode_header_amd *mc_header = mc_amd->mpb;
- const struct equiv_cpu_entry *equiv_cpu_table = mc_amd->equiv_cpu_table;
- unsigned int current_cpu_id;
- unsigned int equiv_cpu_id;
-
- current_cpu_id = cpuid_eax(0x00000001);
-
- if ( !find_equiv_cpu_id(equiv_cpu_table, current_cpu_id, &equiv_cpu_id) )
- return MIS_UCODE;
-
- if ( (mc_header->processor_rev_id) != equiv_cpu_id )
- return MIS_UCODE;
-
- if ( !verify_patch_size(mc_amd->mpb_size) )
- {
- pr_debug("microcode: patch size mismatch\n");
- return MIS_UCODE;
- }
-
- if ( mc_header->patch_id <= sig->rev )
- {
- pr_debug("microcode: patch is already at required level or greater.\n");
- return OLD_UCODE;
- }
-
- pr_debug("microcode: CPU%d found a matching microcode update with version %#x (current=%#x)\n",
- cpu, mc_header->patch_id, sig->rev);
-
- return NEW_UCODE;
-}
-
-static bool match_cpu(const struct microcode_patch *patch)
-{
- return patch && (microcode_fits(patch->mc_amd) == NEW_UCODE);
-}
-
-static void free_patch(void *mc)
-{
- struct microcode_amd *mc_amd = mc;
-
- if ( mc_amd )
- {
- xfree(mc_amd->equiv_cpu_table);
- xfree(mc_amd->mpb);
- xfree(mc_amd);
- }
-}
-
-static enum microcode_match_result compare_header(
- const struct microcode_header_amd *new_header,
- const struct microcode_header_amd *old_header)
-{
- if ( new_header->processor_rev_id == old_header->processor_rev_id )
- return (new_header->patch_id > old_header->patch_id) ? NEW_UCODE
- : OLD_UCODE;
-
- return MIS_UCODE;
-}
-
-static enum microcode_match_result compare_patch(
- const struct microcode_patch *new, const struct microcode_patch *old)
-{
- const struct microcode_header_amd *new_header = new->mc_amd->mpb;
- const struct microcode_header_amd *old_header = old->mc_amd->mpb;
-
- /* Both patches to compare are supposed to be applicable to local CPU. */
- ASSERT(microcode_fits(new->mc_amd) != MIS_UCODE);
- ASSERT(microcode_fits(new->mc_amd) != MIS_UCODE);
-
- return compare_header(new_header, old_header);
-}
-
-static int apply_microcode(const struct microcode_patch *patch)
-{
- uint32_t rev;
- int hw_err;
- unsigned int cpu = smp_processor_id();
- struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
- const struct microcode_header_amd *hdr;
-
- if ( !patch )
- return -ENOENT;
-
- if ( !match_cpu(patch) )
- return -EINVAL;
-
- hdr = patch->mc_amd->mpb;
-
- BUG_ON(local_irq_is_enabled());
-
- hw_err = wrmsr_safe(MSR_AMD_PATCHLOADER, (unsigned long)hdr);
-
- /* get patch id after patching */
- rdmsrl(MSR_AMD_PATCHLEVEL, rev);
-
- /*
- * Some processors leave the ucode blob mapping as UC after the update.
- * Flush the mapping to regain normal cacheability.
- */
- flush_area_local(hdr, FLUSH_TLB_GLOBAL | FLUSH_ORDER(0));
-
- /* check current patch id and patch's id for match */
- if ( hw_err || (rev != hdr->patch_id) )
- {
- printk(KERN_ERR "microcode: CPU%d update from revision "
- "%#x to %#x failed\n", cpu, rev, hdr->patch_id);
- return -EIO;
- }
-
- printk(KERN_WARNING "microcode: CPU%d updated from revision %#x to %#x\n",
- cpu, sig->rev, hdr->patch_id);
-
- sig->rev = rev;
-
- return 0;
-}
-
-static int get_ucode_from_buffer_amd(
- struct microcode_amd *mc_amd,
- const void *buf,
- size_t bufsize,
- size_t *offset)
-{
- const struct mpbhdr *mpbuf = buf + *offset;
-
- /* No more data */
- if ( *offset >= bufsize )
- {
- printk(KERN_ERR "microcode: Microcode buffer overrun\n");
- return -EINVAL;
- }
-
- if ( mpbuf->type != UCODE_UCODE_TYPE )
- {
- printk(KERN_ERR "microcode: Wrong microcode payload type field\n");
- return -EINVAL;
- }
-
- if ( (*offset + mpbuf->len) > bufsize )
- {
- printk(KERN_ERR "microcode: Bad data in microcode data file\n");
- return -EINVAL;
- }
-
- mc_amd->mpb = xmalloc_bytes(mpbuf->len);
- if ( !mc_amd->mpb )
- return -ENOMEM;
- mc_amd->mpb_size = mpbuf->len;
- memcpy(mc_amd->mpb, mpbuf->data, mpbuf->len);
-
- pr_debug("microcode: CPU%d size %zu, block size %u offset %zu equivID %#x rev %#x\n",
- raw_smp_processor_id(), bufsize, mpbuf->len, *offset,
- ((struct microcode_header_amd *)mc_amd->mpb)->processor_rev_id,
- ((struct microcode_header_amd *)mc_amd->mpb)->patch_id);
-
- *offset += mpbuf->len + SECTION_HDR_SIZE;
-
- return 0;
-}
-
-static int install_equiv_cpu_table(
- struct microcode_amd *mc_amd,
- const void *data,
- size_t *offset)
-{
- const struct mpbhdr *mpbuf = data + *offset + 4;
-
- *offset += mpbuf->len + CONT_HDR_SIZE; /* add header length */
-
- if ( mpbuf->type != UCODE_EQUIV_CPU_TABLE_TYPE )
- {
- printk(KERN_ERR "microcode: Wrong microcode equivalent cpu table type field\n");
- return -EINVAL;
- }
-
- if ( mpbuf->len == 0 )
- {
- printk(KERN_ERR "microcode: Wrong microcode equivalent cpu table length\n");
- return -EINVAL;
- }
-
- mc_amd->equiv_cpu_table = xmalloc_bytes(mpbuf->len);
- if ( !mc_amd->equiv_cpu_table )
- {
- printk(KERN_ERR "microcode: Cannot allocate memory for equivalent cpu table\n");
- return -ENOMEM;
- }
-
- memcpy(mc_amd->equiv_cpu_table, mpbuf->data, mpbuf->len);
- mc_amd->equiv_cpu_table_size = mpbuf->len;
-
- return 0;
-}
-
-static int container_fast_forward(const void *data, size_t size_left, size_t *offset)
-{
- for ( ; ; )
- {
- size_t size;
- const uint32_t *header;
-
- if ( size_left < SECTION_HDR_SIZE )
- return -EINVAL;
-
- header = data + *offset;
-
- if ( header[0] == UCODE_MAGIC &&
- header[1] == UCODE_EQUIV_CPU_TABLE_TYPE )
- break;
-
- if ( header[0] != UCODE_UCODE_TYPE )
- return -EINVAL;
- size = header[1] + SECTION_HDR_SIZE;
- if ( size < PATCH_HDR_SIZE || size_left < size )
- return -EINVAL;
-
- size_left -= size;
- *offset += size;
-
- if ( !size_left )
- return -ENODATA;
- }
-
- return 0;
-}
-
-/*
- * The 'final_levels' of patch ids have been obtained empirically.
- * Refer bug https://bugzilla.suse.com/show_bug.cgi?id=913996
- * for details of the issue. The short version is that people
- * using certain Fam10h systems noticed system hang issues when
- * trying to update microcode levels beyond the patch IDs below.
- * From internal discussions, we gathered that OS/hypervisor
- * cannot reliably perform microcode updates beyond these levels
- * due to hardware issues. Therefore, we need to abort microcode
- * update process if we hit any of these levels.
- */
-static const unsigned int final_levels[] = {
- 0x01000098,
- 0x0100009f,
- 0x010000af
-};
-
-static bool_t check_final_patch_levels(unsigned int cpu)
-{
- /*
- * Check the current patch levels on the cpu. If they are equal to
- * any of the 'final_levels', then we should not update the microcode
- * patch on the cpu as system will hang otherwise.
- */
- const struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
- unsigned int i;
-
- if ( boot_cpu_data.x86 != 0x10 )
- return 0;
-
- for ( i = 0; i < ARRAY_SIZE(final_levels); i++ )
- if ( sig->rev == final_levels[i] )
- return 1;
-
- return 0;
-}
-
-static struct microcode_patch *cpu_request_microcode(const void *buf,
- size_t bufsize)
-{
- struct microcode_amd *mc_amd;
- struct microcode_header_amd *saved = NULL;
- struct microcode_patch *patch = NULL;
- size_t offset = 0, saved_size = 0;
- int error = 0;
- unsigned int current_cpu_id;
- unsigned int equiv_cpu_id;
- unsigned int cpu = smp_processor_id();
- const struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
-
- current_cpu_id = cpuid_eax(0x00000001);
-
- if ( *(const uint32_t *)buf != UCODE_MAGIC )
- {
- printk(KERN_ERR "microcode: Wrong microcode patch file magic\n");
- error = -EINVAL;
- goto out;
- }
-
- if ( check_final_patch_levels(cpu) )
- {
- printk(XENLOG_INFO
- "microcode: Cannot update microcode patch on the cpu as we hit a final level\n");
- error = -EPERM;
- goto out;
- }
-
- mc_amd = xzalloc(struct microcode_amd);
- if ( !mc_amd )
- {
- printk(KERN_ERR "microcode: Cannot allocate memory for microcode patch\n");
- error = -ENOMEM;
- goto out;
- }
-
- /*
- * Multiple container file support:
- * 1. check if this container file has equiv_cpu_id match
- * 2. If not, fast-fwd to next container file
- */
- while ( offset < bufsize )
- {
- error = install_equiv_cpu_table(mc_amd, buf, &offset);
- if ( error )
- {
- printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
- break;
- }
-
- /*
- * Could happen as we advance 'offset' early
- * in install_equiv_cpu_table
- */
- if ( offset > bufsize )
- {
- printk(KERN_ERR "microcode: Microcode buffer overrun\n");
- error = -EINVAL;
- break;
- }
-
- if ( find_equiv_cpu_id(mc_amd->equiv_cpu_table, current_cpu_id,
- &equiv_cpu_id) )
- break;
-
- error = container_fast_forward(buf, bufsize - offset, &offset);
- if ( error == -ENODATA )
- {
- ASSERT(offset == bufsize);
- break;
- }
- if ( error )
- {
- printk(KERN_ERR "microcode: CPU%d incorrect or corrupt container file\n"
- "microcode: Failed to update patch level. "
- "Current lvl:%#x\n", cpu, sig->rev);
- break;
- }
- }
-
- if ( error )
- {
- /*
- * -ENODATA here means that the blob was parsed fine but no matching
- * ucode was found. Don't return it to the caller.
- */
- if ( error == -ENODATA )
- error = 0;
-
- xfree(mc_amd->equiv_cpu_table);
- xfree(mc_amd);
- goto out;
- }
-
- /*
- * It's possible the data file has multiple matching ucode,
- * lets keep searching till the latest version
- */
- while ( (error = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
- &offset)) == 0 )
- {
- /*
- * If the new ucode covers current CPU, compare ucodes and store the
- * one with higher revision.
- */
- if ( (microcode_fits(mc_amd) != MIS_UCODE) &&
- (!saved || (compare_header(mc_amd->mpb, saved) == NEW_UCODE)) )
- {
- xfree(saved);
- saved = mc_amd->mpb;
- saved_size = mc_amd->mpb_size;
- }
- else
- {
- xfree(mc_amd->mpb);
- mc_amd->mpb = NULL;
- }
-
- if ( offset >= bufsize )
- break;
-
- /*
- * 1. Given a situation where multiple containers exist and correct
- * patch lives on a container that is not the last container.
- * 2. We match equivalent ids using find_equiv_cpu_id() from the
- * earlier while() (On this case, matches on earlier container
- * file and we break)
- * 3. Proceed to while ( (error = get_ucode_from_buffer_amd(mc_amd,
- * buf, bufsize,&offset)) == 0 )
- * 4. Find correct patch using microcode_fits() and apply the patch
- * (Assume: apply_microcode() is successful)
- * 5. The while() loop from (3) continues to parse the binary as
- * there is a subsequent container file, but...
- * 6. ...a correct patch can only be on one container and not on any
- * subsequent ones. (Refer docs for more info) Therefore, we
- * don't have to parse a subsequent container. So, we can abort
- * the process here.
- * 7. This ensures that we retain a success value (= 0) to 'error'
- * before if ( mpbuf->type != UCODE_UCODE_TYPE ) evaluates to
- * false and returns -EINVAL.
- */
- if ( offset + SECTION_HDR_SIZE <= bufsize &&
- *(const uint32_t *)(buf + offset) == UCODE_MAGIC )
- break;
- }
-
- if ( saved )
- {
- mc_amd->mpb = saved;
- mc_amd->mpb_size = saved_size;
- patch = xmalloc(struct microcode_patch);
- if ( patch )
- patch->mc_amd = mc_amd;
- else
- {
- free_patch(mc_amd);
- error = -ENOMEM;
- }
- }
- else
- free_patch(mc_amd);
-
- out:
- if ( error && !patch )
- patch = ERR_PTR(error);
-
- return patch;
-}
-
-#ifdef CONFIG_HVM
-static int start_update(void)
-{
- /*
- * svm_host_osvw_init() will be called on each cpu by calling '.end_update'
- * in common code.
- */
- svm_host_osvw_reset();
-
- return 0;
-}
-#endif
-
-static const struct microcode_ops microcode_amd_ops = {
- .cpu_request_microcode = cpu_request_microcode,
- .collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode,
-#ifdef CONFIG_HVM
- .start_update = start_update,
- .end_update_percpu = svm_host_osvw_init,
-#endif
- .free_patch = free_patch,
- .compare_patch = compare_patch,
- .match_cpu = match_cpu,
-};
-
-int __init microcode_init_amd(void)
-{
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- microcode_ops = µcode_amd_ops;
- return 0;
-}
+++ /dev/null
-/*
- * Intel CPU Microcode Update Driver for Linux
- *
- * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
- * 2006 Shaohua Li <shaohua.li@intel.com> *
- * This driver allows to upgrade microcode on Intel processors
- * belonging to IA-32 family - PentiumPro, Pentium II,
- * Pentium III, Xeon, Pentium 4, etc.
- *
- * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
- * Software Developer's Manual
- * Order Number 253668 or free download from:
- *
- * http://developer.intel.com/design/pentium4/manuals/253668.htm
- *
- * For more information, go to http://www.urbanmyth.org/microcode
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <xen/err.h>
-#include <xen/init.h>
-#include <xen/kernel.h>
-#include <xen/lib.h>
-#include <xen/sched.h>
-#include <xen/smp.h>
-#include <xen/spinlock.h>
-
-#include <asm/msr.h>
-#include <asm/processor.h>
-#include <asm/microcode.h>
-
-#define pr_debug(x...) ((void)0)
-
-struct microcode_header_intel {
- unsigned int hdrver;
- unsigned int rev;
- union {
- struct {
- uint16_t year;
- uint8_t day;
- uint8_t month;
- };
- unsigned int date;
- };
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int datasize;
- unsigned int totalsize;
- unsigned int reserved[3];
-};
-
-struct microcode_intel {
- struct microcode_header_intel hdr;
- unsigned int bits[0];
-};
-
-/* microcode format is extended from prescott processors */
-struct extended_signature {
- unsigned int sig;
- unsigned int pf;
- unsigned int cksum;
-};
-
-struct extended_sigtable {
- unsigned int count;
- unsigned int cksum;
- unsigned int reserved[3];
- struct extended_signature sigs[0];
-};
-
-#define DEFAULT_UCODE_DATASIZE (2000)
-#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
-#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
-#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
-#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
-#define DWSIZE (sizeof(u32))
-#define get_totalsize(mc) \
- (((struct microcode_intel *)mc)->hdr.totalsize ? \
- ((struct microcode_intel *)mc)->hdr.totalsize : \
- DEFAULT_UCODE_TOTALSIZE)
-
-#define get_datasize(mc) \
- (((struct microcode_intel *)mc)->hdr.datasize ? \
- ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
-
-#define sigmatch(s1, s2, p1, p2) \
- (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
-
-#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
-
-static int collect_cpu_info(struct cpu_signature *csig)
-{
- unsigned int cpu_num = smp_processor_id();
- struct cpuinfo_x86 *c = &cpu_data[cpu_num];
- uint64_t msr_content;
-
- memset(csig, 0, sizeof(*csig));
-
- if ( (c->x86_vendor != X86_VENDOR_INTEL) || (c->x86 < 6) )
- {
- printk(KERN_ERR "microcode: CPU%d not a capable Intel "
- "processor\n", cpu_num);
- return -1;
- }
-
- csig->sig = cpuid_eax(0x00000001);
-
- if ( (c->x86_model >= 5) || (c->x86 > 6) )
- {
- /* get processor flags from MSR 0x17 */
- rdmsrl(MSR_IA32_PLATFORM_ID, msr_content);
- csig->pf = 1 << ((msr_content >> 50) & 7);
- }
-
- wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_eax(1);
-
- /* get the current revision from MSR 0x8B */
- rdmsrl(MSR_IA32_UCODE_REV, msr_content);
- csig->rev = (uint32_t)(msr_content >> 32);
- pr_debug("microcode: collect_cpu_info : sig=%#x, pf=%#x, rev=%#x\n",
- csig->sig, csig->pf, csig->rev);
-
- return 0;
-}
-
-static int microcode_sanity_check(const void *mc)
-{
- const struct microcode_header_intel *mc_header = mc;
- const struct extended_sigtable *ext_header = NULL;
- const struct extended_signature *ext_sig;
- unsigned long total_size, data_size, ext_table_size;
- unsigned int ext_sigcount = 0, i;
- uint32_t sum, orig_sum;
-
- total_size = get_totalsize(mc_header);
- data_size = get_datasize(mc_header);
- if ( (data_size + MC_HEADER_SIZE) > total_size )
- {
- printk(KERN_ERR "microcode: error! "
- "Bad data size in microcode data file\n");
- return -EINVAL;
- }
-
- if ( (mc_header->ldrver != 1) || (mc_header->hdrver != 1) )
- {
- printk(KERN_ERR "microcode: error! "
- "Unknown microcode update format\n");
- return -EINVAL;
- }
- ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
- if ( ext_table_size )
- {
- if ( (ext_table_size < EXT_HEADER_SIZE) ||
- ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE) )
- {
- printk(KERN_ERR "microcode: error! "
- "Small exttable size in microcode data file\n");
- return -EINVAL;
- }
- ext_header = mc + MC_HEADER_SIZE + data_size;
- if ( ext_table_size != exttable_size(ext_header) )
- {
- printk(KERN_ERR "microcode: error! "
- "Bad exttable size in microcode data file\n");
- return -EFAULT;
- }
- ext_sigcount = ext_header->count;
- }
-
- /* check extended table checksum */
- if ( ext_table_size )
- {
- uint32_t ext_table_sum = 0;
- uint32_t *ext_tablep = (uint32_t *)ext_header;
-
- i = ext_table_size / DWSIZE;
- while ( i-- )
- ext_table_sum += ext_tablep[i];
- if ( ext_table_sum )
- {
- printk(KERN_WARNING "microcode: aborting, "
- "bad extended signature table checksum\n");
- return -EINVAL;
- }
- }
-
- /* calculate the checksum */
- orig_sum = 0;
- i = (MC_HEADER_SIZE + data_size) / DWSIZE;
- while ( i-- )
- orig_sum += ((uint32_t *)mc)[i];
- if ( orig_sum )
- {
- printk(KERN_ERR "microcode: aborting, bad checksum\n");
- return -EINVAL;
- }
- if ( !ext_table_size )
- return 0;
- /* check extended signature checksum */
- for ( i = 0; i < ext_sigcount; i++ )
- {
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
- EXT_SIGNATURE_SIZE * i;
- sum = orig_sum
- - (mc_header->sig + mc_header->pf + mc_header->cksum)
- + (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
- if ( sum )
- {
- printk(KERN_ERR "microcode: aborting, bad checksum\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/* Check an update against the CPU signature and current update revision */
-static enum microcode_match_result microcode_update_match(
- const struct microcode_header_intel *mc_header)
-{
- const struct extended_sigtable *ext_header;
- const struct extended_signature *ext_sig;
- unsigned int i;
- struct cpu_signature *cpu_sig = &this_cpu(cpu_sig);
- unsigned int sig = cpu_sig->sig;
- unsigned int pf = cpu_sig->pf;
- unsigned int rev = cpu_sig->rev;
- unsigned long data_size = get_datasize(mc_header);
- const void *end = (const void *)mc_header + get_totalsize(mc_header);
-
- ASSERT(!microcode_sanity_check(mc_header));
- if ( sigmatch(sig, mc_header->sig, pf, mc_header->pf) )
- return (mc_header->rev > rev) ? NEW_UCODE : OLD_UCODE;
-
- ext_header = (const void *)(mc_header + 1) + data_size;
- ext_sig = (const void *)(ext_header + 1);
-
- /*
- * Make sure there is enough space to hold an extended header and enough
- * array elements.
- */
- if ( end <= (const void *)ext_sig )
- return MIS_UCODE;
-
- for ( i = 0; i < ext_header->count; i++ )
- if ( sigmatch(sig, ext_sig[i].sig, pf, ext_sig[i].pf) )
- return (mc_header->rev > rev) ? NEW_UCODE : OLD_UCODE;
-
- return MIS_UCODE;
-}
-
-static bool match_cpu(const struct microcode_patch *patch)
-{
- if ( !patch )
- return false;
-
- return microcode_update_match(&patch->mc_intel->hdr) == NEW_UCODE;
-}
-
-static void free_patch(void *mc)
-{
- xfree(mc);
-}
-
-static enum microcode_match_result compare_patch(
- const struct microcode_patch *new, const struct microcode_patch *old)
-{
- /*
- * Both patches to compare are supposed to be applicable to local CPU.
- * Just compare the revision number.
- */
- ASSERT(microcode_update_match(&old->mc_intel->hdr) != MIS_UCODE);
- ASSERT(microcode_update_match(&new->mc_intel->hdr) != MIS_UCODE);
-
- return (new->mc_intel->hdr.rev > old->mc_intel->hdr.rev) ? NEW_UCODE
- : OLD_UCODE;
-}
-
-static int apply_microcode(const struct microcode_patch *patch)
-{
- uint64_t msr_content;
- unsigned int val[2];
- unsigned int cpu_num = raw_smp_processor_id();
- struct cpu_signature *sig = &this_cpu(cpu_sig);
- const struct microcode_intel *mc_intel;
-
- if ( !patch )
- return -ENOENT;
-
- if ( !match_cpu(patch) )
- return -EINVAL;
-
- mc_intel = patch->mc_intel;
-
- BUG_ON(local_irq_is_enabled());
-
- /* write microcode via MSR 0x79 */
- wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc_intel->bits);
- wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_eax(1);
-
- /* get the current revision from MSR 0x8B */
- rdmsrl(MSR_IA32_UCODE_REV, msr_content);
- val[1] = (uint32_t)(msr_content >> 32);
-
- if ( val[1] != mc_intel->hdr.rev )
- {
- printk(KERN_ERR "microcode: CPU%d update from revision "
- "%#x to %#x failed. Resulting revision is %#x.\n", cpu_num,
- sig->rev, mc_intel->hdr.rev, val[1]);
- return -EIO;
- }
- printk(KERN_INFO "microcode: CPU%d updated from revision "
- "%#x to %#x, date = %04x-%02x-%02x\n",
- cpu_num, sig->rev, val[1], mc_intel->hdr.year,
- mc_intel->hdr.month, mc_intel->hdr.day);
- sig->rev = val[1];
-
- return 0;
-}
-
-static long get_next_ucode_from_buffer(struct microcode_intel **mc,
- const uint8_t *buf, unsigned long size,
- unsigned long offset)
-{
- struct microcode_header_intel *mc_header;
- unsigned long total_size;
-
- /* No more data */
- if ( offset >= size )
- return 0;
- mc_header = (struct microcode_header_intel *)(buf + offset);
- total_size = get_totalsize(mc_header);
-
- if ( (offset + total_size) > size )
- {
- printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
- return -EINVAL;
- }
-
- *mc = xmalloc_bytes(total_size);
- if ( *mc == NULL )
- {
- printk(KERN_ERR "microcode: error! Can not allocate memory\n");
- return -ENOMEM;
- }
- memcpy(*mc, (const void *)(buf + offset), total_size);
- return offset + total_size;
-}
-
-static struct microcode_patch *cpu_request_microcode(const void *buf,
- size_t size)
-{
- long offset = 0;
- int error = 0;
- struct microcode_intel *mc, *saved = NULL;
- struct microcode_patch *patch = NULL;
-
- while ( (offset = get_next_ucode_from_buffer(&mc, buf, size, offset)) > 0 )
- {
- error = microcode_sanity_check(mc);
- if ( error )
- {
- xfree(mc);
- break;
- }
-
- /*
- * If the new update covers current CPU, compare updates and store the
- * one with higher revision.
- */
- if ( (microcode_update_match(&mc->hdr) != MIS_UCODE) &&
- (!saved || (mc->hdr.rev > saved->hdr.rev)) )
- {
- xfree(saved);
- saved = mc;
- }
- else
- xfree(mc);
- }
- if ( offset < 0 )
- error = offset;
-
- if ( saved )
- {
- patch = xmalloc(struct microcode_patch);
- if ( patch )
- patch->mc_intel = saved;
- else
- {
- xfree(saved);
- error = -ENOMEM;
- }
- }
-
- if ( error && !patch )
- patch = ERR_PTR(error);
-
- return patch;
-}
-
-static const struct microcode_ops microcode_intel_ops = {
- .cpu_request_microcode = cpu_request_microcode,
- .collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode,
- .free_patch = free_patch,
- .compare_patch = compare_patch,
- .match_cpu = match_cpu,
-};
-
-int __init microcode_init_intel(void)
-{
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
- microcode_ops = µcode_intel_ops;
- return 0;
-}
#include <xen/percpu.h>
-enum microcode_match_result {
- OLD_UCODE, /* signature matched, but revision id is older or equal */
- NEW_UCODE, /* signature matched, but revision id is newer */
- MIS_UCODE, /* signature mismatched */
-};
-
-struct cpu_signature;
-
-struct microcode_patch {
- union {
- struct microcode_intel *mc_intel;
- struct microcode_amd *mc_amd;
- void *mc;
- };
-};
-
-struct microcode_ops {
- struct microcode_patch *(*cpu_request_microcode)(const void *buf,
- size_t size);
- int (*collect_cpu_info)(struct cpu_signature *csig);
- int (*apply_microcode)(const struct microcode_patch *patch);
- int (*start_update)(void);
- void (*end_update_percpu)(void);
- void (*free_patch)(void *mc);
- bool (*match_cpu)(const struct microcode_patch *patch);
- enum microcode_match_result (*compare_patch)(
- const struct microcode_patch *new, const struct microcode_patch *old);
-};
-
struct cpu_signature {
unsigned int sig;
unsigned int pf;
};
DECLARE_PER_CPU(struct cpu_signature, cpu_sig);
-extern const struct microcode_ops *microcode_ops;
#endif /* ASM_X86__MICROCODE_H */