From: Anthony PERARD Date: Wed, 15 Dec 2021 09:14:13 +0000 (+0100) Subject: xen: move include/asm-* to arch/*/include/asm X-Git-Tag: archive/raspbian/4.17.0-1+rpi1^2~33^2~1223 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=725381a5eab35227ef0099a43e05034def42bb77;p=xen.git xen: move include/asm-* to arch/*/include/asm This avoid the need to create the symbolic link "include/asm". Whenever a comment refer to an "asm" headers, this patch avoid spelling the arch when not needed to avoid some code churn. One unrelated change is to sort entries in MAINTAINERS for "INTEL(R) VT FOR X86 (VT-X)" Signed-off-by: Anthony PERARD Acked-by: Paul Durrant Acked-by: Jan Beulich Acked-by: Julien Grall Acked-by: Andrew Cooper --- diff --git a/.gitignore b/.gitignore index 111eb03b86..e13cbf84b2 100644 --- a/.gitignore +++ b/.gitignore @@ -314,6 +314,7 @@ xen/arch/x86/boot/*.lnk xen/arch/x86/efi.lds xen/arch/x86/efi/check.efi xen/arch/x86/efi/mkreloc +xen/arch/x86/include/asm/asm-macros.h xen/arch/*/xen.lds xen/arch/*/efi/boot.c xen/arch/*/efi/compat.c @@ -321,12 +322,10 @@ xen/arch/*/efi/ebmalloc.c xen/arch/*/efi/efi.h xen/arch/*/efi/pe.c xen/arch/*/efi/runtime.c +xen/arch/*/include/asm/asm-offsets.h xen/common/config_data.S xen/common/config.gz xen/include/headers*.chk -xen/include/asm -xen/include/asm-*/asm-offsets.h -xen/include/asm-x86/asm-macros.h xen/include/compat/* xen/include/config/ xen/include/generated/ diff --git a/MAINTAINERS b/MAINTAINERS index 22ea62d964..6e84a05760 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -245,7 +245,6 @@ F: xen/drivers/char/omap-uart.c F: xen/drivers/char/pl011.c F: xen/drivers/char/scif-uart.c F: xen/drivers/passthrough/arm/ -F: xen/include/asm-arm/ F: xen/include/public/arch-arm/ F: xen/include/public/arch-arm.h @@ -290,10 +289,10 @@ EFI M: Jan Beulich S: Supported F: xen/arch/x86/efi/ +F: xen/arch/x86/include/asm/efi*.h +F: xen/arch/x86/include/asm/x86_*/efi*.h F: xen/common/efi/ F: xen/include/efi/ -F: xen/include/asm-x86/efi*.h -F: xen/include/asm-x86/x86_*/efi*.h GDBSX DEBUGGER M: Elena Ufimtseva @@ -319,8 +318,8 @@ F: xen/include/xen/hypfs.h INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) R: Lukasz Hawrylko S: Odd Fixes +F: xen/arch/x86/include/asm/tboot.h F: xen/arch/x86/tboot.c -F: xen/include/asm-x86/tboot.h INTEL(R) VT FOR DIRECTED I/O (VT-D) M: Kevin Tian @@ -331,10 +330,10 @@ INTEL(R) VT FOR X86 (VT-X) M: Jun Nakajima M: Kevin Tian S: Supported +F: xen/arch/x86/cpu/vpmu_intel.c F: xen/arch/x86/hvm/vmx/ +F: xen/arch/x86/include/asm/hvm/vmx/ F: xen/arch/x86/mm/p2m-ept.c -F: xen/include/asm-x86/hvm/vmx/ -F: xen/arch/x86/cpu/vpmu_intel.c IOMMU VENDOR INDEPENDENT CODE M: Jan Beulich @@ -401,10 +400,10 @@ M: Ross Lagerwall S: Supported F: docs/misc/livepatch.pandoc F: tools/misc/xen-livepatch.c +F: xen/arch/*/include/asm/livepatch.h F: xen/arch/*/livepatch* F: xen/arch/*/*/livepatch* F: xen/common/livepatch* -F: xen/include/asm-*/livepatch.h F: xen/include/xen/livepatch* F: xen/test/livepatch/* @@ -473,7 +472,6 @@ R: Connor Davis S: Supported F: config/riscv64.mk F: xen/arch/riscv/ -F: xen/include/asm-riscv/ RTDS SCHEDULER M: Dario Faggioli @@ -502,8 +500,8 @@ F: stubdom/ TEE MEDIATORS M: Volodymyr Babchuk S: Supported +F: xen/arch/arm/include/asm/tee F: xen/arch/arm/tee/ -F: xen/include/asm-arm/tee TOOLSTACK M: Wei Liu @@ -531,6 +529,8 @@ F: tools/misc/xen-access.c F: xen/arch/*/monitor.c F: xen/arch/*/vm_event.c F: xen/arch/arm/mem_access.c +F: xen/arch/x86/include/asm/hvm/monitor.h +F: xen/arch/x86/include/asm/hvm/vm_event.h F: xen/arch/x86/mm/mem_access.c F: xen/arch/x86/hvm/monitor.c F: xen/arch/x86/hvm/vm_event.c @@ -540,8 +540,6 @@ F: xen/common/vm_event.c F: xen/include/*/mem_access.h F: xen/include/*/monitor.h F: xen/include/*/vm_event.h -F: xen/include/asm-x86/hvm/monitor.h -F: xen/include/asm-x86/hvm/vm_event.h VPCI M: Roger Pau Monné @@ -567,7 +565,6 @@ R: Wei Liu S: Supported L: xen-devel@lists.xenproject.org F: xen/arch/x86/ -F: xen/include/asm-x86/ F: xen/include/public/arch-x86/ F: xen/include/xen/lib/x86 F: xen/lib/x86 @@ -587,10 +584,10 @@ F: xen/arch/x86/hvm/emulate.c F: xen/arch/x86/hvm/intercept.c F: xen/arch/x86/hvm/io.c F: xen/arch/x86/hvm/ioreq.c -F: xen/include/asm-x86/hvm/emulate.h -F: xen/include/asm-x86/hvm/io.h -F: xen/include/asm-x86/hvm/ioreq.h -F: xen/include/asm-x86/ioreq.h +F: xen/arch/x86/include/asm/hvm/emulate.h +F: xen/arch/x86/include/asm/hvm/io.h +F: xen/arch/x86/include/asm/hvm/ioreq.h +F: xen/arch/x86/include/asm/ioreq.h X86 MEMORY MANAGEMENT M: Jan Beulich @@ -622,10 +619,10 @@ M: Wei Liu S: Supported F: xen/arch/x86/guest/hyperv/ F: xen/arch/x86/hvm/viridian/ -F: xen/include/asm-x86/guest/hyperv.h -F: xen/include/asm-x86/guest/hyperv-hcall.h -F: xen/include/asm-x86/guest/hyperv-tlfs.h -F: xen/include/asm-x86/hvm/viridian.h +F: xen/arch/x86/include/asm/guest/hyperv.h +F: xen/arch/x86/include/asm/guest/hyperv-hcall.h +F: xen/arch/x86/include/asm/guest/hyperv-tlfs.h +F: xen/arch/x86/include/asm/hvm/viridian.h XENSTORE M: Wei Liu diff --git a/tools/include/Makefile b/tools/include/Makefile index 42605d46b9..d7b51006e0 100644 --- a/tools/include/Makefile +++ b/tools/include/Makefile @@ -30,7 +30,7 @@ xen-dir: ln -s $(XEN_ROOT)/xen/include/acpi/platform acpi/ ln -s $(XEN_ROOT)/xen/include/acpi/ac*.h acpi/ ifeq ($(CONFIG_X86),y) - ln -s $(XEN_ROOT)/xen/include/asm-x86 xen/asm + ln -s $(XEN_ROOT)/xen/arch/x86/include/asm xen/ mkdir -p xen/lib/x86 ln -s $(filter-out %autogen.h,$(wildcard $(XEN_ROOT)/xen/include/xen/lib/x86/*.h)) xen/lib/x86/ ln -s $(XEN_ROOT)/xen/include/xen/lib/x86/Makefile xen/lib/x86/ diff --git a/tools/misc/xen-access.c b/tools/misc/xen-access.c index 4bbef0bd2e..0731c20b83 100644 --- a/tools/misc/xen-access.c +++ b/tools/misc/xen-access.c @@ -56,11 +56,11 @@ #define ERROR(a, b...) fprintf(stderr, a "\n", ## b) #define PERROR(a, b...) fprintf(stderr, a ": %s\n", ## b, strerror(errno)) -/* From xen/include/asm-x86/processor.h */ +/* From xen/arch/x86/include/asm/processor.h */ #define X86_TRAP_DEBUG 1 #define X86_TRAP_INT3 3 -/* From xen/include/asm-x86/x86-defns.h */ +/* From xen/arch/x86/include/asm/x86-defns.h */ #define X86_CR4_PGE 0x00000080 /* enable global pages */ typedef struct vm_event { diff --git a/tools/tests/vhpet/Makefile b/tools/tests/vhpet/Makefile index cb88dd01c5..2d56ffdfd9 100644 --- a/tools/tests/vhpet/Makefile +++ b/tools/tests/vhpet/Makefile @@ -32,7 +32,7 @@ distclean: clean .PHONY: install install: -hpet.h: $(XEN_ROOT)/xen/include/asm-x86/hpet.h +hpet.h: $(XEN_ROOT)/xen/arch/x86/include/asm/hpet.h cp $< $@ hpet.c: $(XEN_ROOT)/xen/arch/x86/hvm/hpet.c diff --git a/xen/Makefile b/xen/Makefile index 1fd48af7ae..b2a63edca1 100644 --- a/xen/Makefile +++ b/xen/Makefile @@ -166,7 +166,7 @@ ifeq ($(TARGET_ARCH),x86) t1 = $(call as-insn,$(CC),".L0: .L1: .skip (.L1 - .L0)",,-no-integrated-as) # Check whether clang asm()-s support .include. -t2 = $(call as-insn,$(CC) -I$(BASEDIR)/include,".include \"asm-x86/asm-defns.h\"",,-no-integrated-as) +t2 = $(call as-insn,$(CC) -I$(BASEDIR)/arch/x86/include,".include \"asm/asm-defns.h\"",,-no-integrated-as) # Check whether clang keeps .macro-s between asm()-s: # https://bugs.llvm.org/show_bug.cgi?id=36110 @@ -382,7 +382,7 @@ _clean: delete-unfresh-files -o -name ".*.o.tmp" -o -name "*~" -o -name "core" \ -o -name "*.gcno" -o -name ".*.cmd" -o -name "lib.a" \) -exec rm -f {} \; rm -f include/asm $(TARGET) $(TARGET).gz $(TARGET).efi $(TARGET).efi.map $(TARGET)-syms $(TARGET)-syms.map *~ core - rm -f asm-offsets.s include/asm-*/asm-offsets.h + rm -f asm-offsets.s arch/*/include/asm/asm-offsets.h rm -f .banner .allconfig.tmp .PHONY: _distclean @@ -396,7 +396,6 @@ $(TARGET).gz: $(TARGET) $(TARGET): delete-unfresh-files $(MAKE) -C tools $(MAKE) -f $(BASEDIR)/Rules.mk include/xen/compile.h - [ -e include/asm ] || ln -sf asm-$(TARGET_ARCH) include/asm [ -e arch/$(TARGET_ARCH)/efi ] && for f in $$(cd common/efi; echo *.[ch]); \ do test -r arch/$(TARGET_ARCH)/efi/$$f || \ ln -nsf ../../../common/efi/$$f arch/$(TARGET_ARCH)/efi/; \ @@ -404,7 +403,7 @@ $(TARGET): delete-unfresh-files true $(MAKE) -f $(BASEDIR)/Rules.mk -C include $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) include - $(MAKE) -f $(BASEDIR)/Rules.mk include/asm-$(TARGET_ARCH)/asm-offsets.h + $(MAKE) -f $(BASEDIR)/Rules.mk arch/$(TARGET_ARCH)/include/asm/asm-offsets.h $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) $@ # drivers/char/console.o contains static banner/compile info. Blow it away. @@ -450,7 +449,7 @@ asm-offsets.s: arch/$(TARGET_ARCH)/$(TARGET_SUBARCH)/asm-offsets.c $(CC) $(call cpp_flags,$(c_flags)) -S -g0 -o $@.new -MQ $@ $< $(call move-if-changed,$@.new,$@) -include/asm-$(TARGET_ARCH)/asm-offsets.h: asm-offsets.s +arch/$(TARGET_ARCH)/include/asm/asm-offsets.h: asm-offsets.s @(set -e; \ echo "/*"; \ echo " * DO NOT MODIFY."; \ @@ -468,8 +467,8 @@ include/asm-$(TARGET_ARCH)/asm-offsets.h: asm-offsets.s SUBDIRS = xsm arch/$(TARGET_ARCH) common drivers lib test define all_sources - ( find include/asm-$(TARGET_ARCH) -name '*.h' -print; \ - find include -name 'asm-*' -prune -o -name '*.h' -print; \ + ( find arch/$(TARGET_ARCH)/include -name '*.h' -print; \ + find include -name '*.h' -print; \ find $(SUBDIRS) -name '*.[chS]' -print ) endef diff --git a/xen/arch/arm/README.LinuxPrimitives b/xen/arch/arm/README.LinuxPrimitives index 664a9f89ed..1d53e6a898 100644 --- a/xen/arch/arm/README.LinuxPrimitives +++ b/xen/arch/arm/README.LinuxPrimitives @@ -8,19 +8,19 @@ arm64: bitops: last sync @ v3.16-rc6 (last commit: 8715466b6027) -linux/arch/arm64/include/asm/bitops.h xen/include/asm-arm/arm64/bitops.h +linux/arch/arm64/include/asm/bitops.h xen/arch/arm/include/asm/arm64/bitops.h --------------------------------------------------------------------- cmpxchg: last sync @ v3.16-rc6 (last commit: e1dfda9ced9b) -linux/arch/arm64/include/asm/cmpxchg.h xen/include/asm-arm/arm64/cmpxchg.h +linux/arch/arm64/include/asm/cmpxchg.h xen/arch/arm/include/asm/arm64/cmpxchg.h --------------------------------------------------------------------- atomics: last sync @ v3.16-rc6 (last commit: 8715466b6027) -linux/arch/arm64/include/asm/atomic.h xen/include/asm-arm/arm64/atomic.h +linux/arch/arm64/include/asm/atomic.h xen/arch/arm/include/asm/arm64/atomic.h The following functions were taken from Linux: atomic_add(), atomic_add_return(), atomic_sub(), atomic_sub_return(), @@ -76,13 +76,13 @@ diff -u ../linux/arch/arm/lib/findbit.S xen/arch/arm/arm32/lib/findbit.S cmpxchg: last sync @ v3.16-rc6 (last commit: c32ffce0f66e) -linux/arch/arm/include/asm/cmpxchg.h xen/include/asm-arm/arm32/cmpxchg.h +linux/arch/arm/include/asm/cmpxchg.h xen/arch/arm/include/asm/arm32/cmpxchg.h --------------------------------------------------------------------- atomics: last sync @ v3.16-rc6 (last commit: 030d0178bdbd) -linux/arch/arm/include/asm/atomic.h xen/include/asm-arm/arm32/atomic.h +linux/arch/arm/include/asm/atomic.h xen/arch/arm/include/asm/arm32/atomic.h The following functions were taken from Linux: atomic_add(), atomic_add_return(), atomic_sub(), atomic_sub_return(), diff --git a/xen/arch/arm/arch.mk b/xen/arch/arm/arch.mk index 6a29820594..c3ac443b37 100644 --- a/xen/arch/arm/arch.mk +++ b/xen/arch/arm/arch.mk @@ -2,6 +2,7 @@ # arm-specific definitions CFLAGS += -I$(BASEDIR)/include +CFLAGS += -I$(BASEDIR)/arch/$(TARGET_ARCH)/include $(call cc-options-add,CFLAGS,CC,$(EMBEDDED_EXTRA_CFLAGS)) $(call cc-option-add,CFLAGS,CC,-Wnested-externs) diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S index 7178865f48..b5912d381b 100644 --- a/xen/arch/arm/arm32/head.S +++ b/xen/arch/arm/arm32/head.S @@ -599,7 +599,7 @@ remove_identity_mapping: strd r2, r3, [r0, r1] identity_mapping_removed: - /* See asm-arm/arm32/flushtlb.h for the explanation of the sequence. */ + /* See asm/arm32/flushtlb.h for the explanation of the sequence. */ dsb nshst mcr CP32(r0, TLBIALLH) dsb nsh diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S index aa1f88c764..51b00ab0be 100644 --- a/xen/arch/arm/arm64/head.S +++ b/xen/arch/arm/arm64/head.S @@ -737,7 +737,7 @@ remove_identity_mapping: str xzr, [x0, x1, lsl #3] identity_mapping_removed: - /* See asm-arm/arm64/flushtlb.h for the explanation of the sequence. */ + /* See asm/arm64/flushtlb.h for the explanation of the sequence. */ dsb nshst tlbi alle2 dsb nsh diff --git a/xen/arch/arm/include/asm/acpi.h b/xen/arch/arm/include/asm/acpi.h new file mode 100644 index 0000000000..e53973e054 --- /dev/null +++ b/xen/arch/arm/include/asm/acpi.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2015, Shannon Zhao + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _ASM_ARM_ACPI_H +#define _ASM_ARM_ACPI_H + +#include + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long +#define ACPI_MAP_MEM_ATTR PAGE_HYPERVISOR + +/* Tables marked as reserved in efi table */ +typedef enum { + TBL_FADT, + TBL_MADT, + TBL_STAO, + TBL_XSDT, + TBL_RSDP, + TBL_EFIT, + TBL_MMAP, + TBL_MMAX, +} EFI_MEM_RES; + +bool acpi_psci_present(void); +bool acpi_psci_hvc_present(void); +void acpi_smp_init_cpus(void); + +/* + * This function returns the offset of a given ACPI/EFI table in the allocated + * memory region. Currently, the tables should be created in the same order as + * their associated 'index' in the enum EFI_MEM_RES. This means the function + * won't return the correct offset until all the tables before a given 'index' + * are created. + */ +paddr_t acpi_get_table_offset(struct membank tbl_add[], EFI_MEM_RES index); + +/* Macros for consistency checks of the GICC subtable of MADT */ +#define ACPI_MADT_GICC_LENGTH \ + (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) + +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ + (entry)->header.length != ACPI_MADT_GICC_LENGTH) + +#ifdef CONFIG_ACPI +extern bool acpi_disabled; +/* Basic configuration for ACPI */ +static inline void disable_acpi(void) +{ + acpi_disabled = true; +} + +static inline void enable_acpi(void) +{ + acpi_disabled = false; +} +#else +#define acpi_disabled (true) +#define disable_acpi() +#define enable_acpi() +#endif + +#endif /*_ASM_ARM_ACPI_H*/ diff --git a/xen/arch/arm/include/asm/alternative.h b/xen/arch/arm/include/asm/alternative.h new file mode 100644 index 0000000000..1eb4b60fbb --- /dev/null +++ b/xen/arch/arm/include/asm/alternative.h @@ -0,0 +1,221 @@ +#ifndef __ASM_ALTERNATIVE_H +#define __ASM_ALTERNATIVE_H + +#include +#include + +#define ARM_CB_PATCH ARM_NCAPS + +#ifndef __ASSEMBLY__ + +#include +#include + +struct alt_instr { + s32 orig_offset; /* offset to original instruction */ + s32 alt_offset; /* offset to replacement instruction */ + u16 cpufeature; /* cpufeature bit set for replacement */ + u8 orig_len; /* size of original instruction(s) */ + u8 alt_len; /* size of new instruction(s), <= orig_len */ +}; + +/* Xen: helpers used by common code. */ +#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f) +#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) +#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) + +typedef void (*alternative_cb_t)(const struct alt_instr *alt, + const uint32_t *origptr, uint32_t *updptr, + int nr_inst); + +void apply_alternatives_all(void); +int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end); + +#define ALTINSTR_ENTRY(feature, cb) \ + " .word 661b - .\n" /* label */ \ + " .if " __stringify(cb) " == 0\n" \ + " .word 663f - .\n" /* new instruction */ \ + " .else\n" \ + " .word " __stringify(cb) "- .\n" /* callback */ \ + " .endif\n" \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +/* + * alternative assembly primitive: + * + * If any of these .org directive fail, it means that insn1 and insn2 + * don't have the same length. This used to be written as + * + * .if ((664b-663b) != (662b-661b)) + * .error "Alternatives instruction length mismatch" + * .endif + * + * but most assemblers die if insn1 or insn2 have a .inst. This should + * be fixed in a binutils release posterior to 2.25.51.0.2 (anything + * containing commit 4e4d08cf7399b606 or c1baaddf8861). + * + * Alternatives with callbacks do not generate replacement instructions. + */ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature,cb) \ + ".popsection\n" \ + " .if " __stringify(cb) " == 0\n" \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + "663:\n\t" \ + newinstr "\n" \ + "664:\n\t" \ + ".popsection\n\t" \ + ".org . - (664b-663b) + (662b-661b)\n\t" \ + ".org . - (662b-661b) + (664b-663b)\n" \ + ".else\n\t" \ + "663:\n\t" \ + "664:\n\t" \ + ".endif\n" \ + ".endif\n" + +#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) + +#define ALTERNATIVE_CB(oldinstr, cb) \ + __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM_CB_PATCH, 1, cb) +#else + +#include +#include + +.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len + .word \orig_offset - . + .word \alt_offset - . + .hword \feature + .byte \orig_len + .byte \alt_len +.endm + +.macro alternative_insn insn1, insn2, cap, enable = 1 + .if \enable +661: \insn1 +662: .pushsection .altinstructions, "a" + altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f + .popsection + .pushsection .altinstr_replacement, "ax" +663: \insn2 +664: .popsection + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) + .endif +.endm + +/* + * Alternative sequences + * + * The code for the case where the capability is not present will be + * assembled and linked as normal. There are no restrictions on this + * code. + * + * The code for the case where the capability is present will be + * assembled into a special section to be used for dynamic patching. + * Code for that case must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. + * + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ + +/* + * Begin an alternative code sequence. + */ +.macro alternative_if_not cap + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f + .popsection +661: +.endm + +.macro alternative_if cap + .set .Lasm_alt_mode, 1 + .pushsection .altinstructions, "a" + altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f + .popsection + .pushsection .altinstr_replacement, "ax" + .align 2 /* So GAS knows label 661 is suitably aligned */ +661: +.endm + +/* + * Provide the other half of the alternative code sequence. + */ +.macro alternative_else +662: + .if .Lasm_alt_mode==0 + .pushsection .altinstr_replacement, "ax" + .else + .popsection + .endif +663: +.endm + +.macro alternative_cb cb + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, \cb, ARM_CB_PATCH, 662f-661f, 0 + .popsection +661: +.endm + +/* + * Complete an alternative code sequence. + */ +.macro alternative_endif +664: + .if .Lasm_alt_mode==0 + .popsection + .endif + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) +.endm + +/* + * Provides a trivial alternative or default sequence consisting solely + * of NOPs. The number of NOPs is chosen automatically to match the + * previous case. + */ +.macro alternative_else_nop_endif +alternative_else + nops (662b-661b) / ARCH_PATCH_INSN_SIZE +alternative_endif +.endm + +/* + * Callback-based alternative epilogue + */ +.macro alternative_cb_end +662: +.endm + +#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ + alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) + +#endif /* __ASSEMBLY__ */ + +/* + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); + * + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); + * N.B. If CONFIG_FOO is specified, but not selected, the whole block + * will be omitted, including oldinstr. + */ +#define ALTERNATIVE(oldinstr, newinstr, ...) \ + _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) + +#endif /* __ASM_ALTERNATIVE_H */ diff --git a/xen/arch/arm/include/asm/altp2m.h b/xen/arch/arm/include/asm/altp2m.h new file mode 100644 index 0000000000..df50cb2f09 --- /dev/null +++ b/xen/arch/arm/include/asm/altp2m.h @@ -0,0 +1,39 @@ +/* + * Alternate p2m + * + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_ARM_ALTP2M_H +#define __ASM_ARM_ALTP2M_H + +#include + +/* Alternate p2m on/off per domain */ +static inline bool altp2m_active(const struct domain *d) +{ + /* Not implemented on ARM. */ + return false; +} + +/* Alternate p2m VCPU */ +static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v) +{ + /* Not implemented on ARM, should not be reached. */ + BUG(); + return 0; +} + +#endif /* __ASM_ARM_ALTP2M_H */ diff --git a/xen/arch/arm/include/asm/arm32/atomic.h b/xen/arch/arm/include/asm/arm32/atomic.h new file mode 100644 index 0000000000..2832a72792 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/atomic.h @@ -0,0 +1,175 @@ +/* + * arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ARCH_ARM_ARM32_ATOMIC__ +#define __ARCH_ARM_ARM32_ATOMIC__ + +/* + * ARMv6 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_add\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__("@ atomic_add_return\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_sub\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__("@ atomic_sub_return\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic_and(int m, atomic_t *v) +{ + unsigned long tmp; + int result; + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_and\n" +"1: ldrex %0, [%3]\n" +" and %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (m) + : "cc"); +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + int oldval; + unsigned long res; + + smp_mb(); + prefetchw(&ptr->counter); + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%3]\n" + "mov %0, #0\n" + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int oldval, newval; + unsigned long tmp; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__ ("@ atomic_add_unless\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" beq 2f\n" +" add %1, %0, %6\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (oldval != u) + smp_mb(); + + return oldval; +} + +#endif /* __ARCH_ARM_ARM32_ATOMIC__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/bitops.h b/xen/arch/arm/include/asm/arm32/bitops.h new file mode 100644 index 0000000000..57938a5874 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/bitops.h @@ -0,0 +1,42 @@ +#ifndef _ARM_ARM32_BITOPS_H +#define _ARM_ARM32_BITOPS_H + +#define flsl fls + +/* + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. + */ +extern int _find_first_zero_bit_le(const void * p, unsigned size); +extern int _find_next_zero_bit_le(const void * p, int size, int offset); +extern int _find_first_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_bit_le(const unsigned long *p, int size, int offset); + +/* + * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. + */ +extern int _find_first_zero_bit_be(const void * p, unsigned size); +extern int _find_next_zero_bit_be(const void * p, int size, int offset); +extern int _find_first_bit_be(const unsigned long *p, unsigned size); +extern int _find_next_bit_be(const unsigned long *p, int size, int offset); + +#ifndef __ARMEB__ +/* + * These are the little endian, atomic definitions. + */ +#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_le(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) + +#else +/* + * These are the big endian, atomic definitions. + */ +#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_be(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) + +#endif + +#endif /* _ARM_ARM32_BITOPS_H */ diff --git a/xen/arch/arm/include/asm/arm32/bug.h b/xen/arch/arm/include/asm/arm32/bug.h new file mode 100644 index 0000000000..25cce151dc --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/bug.h @@ -0,0 +1,15 @@ +#ifndef __ARM_ARM32_BUG_H__ +#define __ARM_ARM32_BUG_H__ + +#include + +/* ARMv7 provides a list of undefined opcode (see A8.8.247 DDI 0406C.b) + * Use one them encoding A1 to go in exception mode + */ +#define BUG_OPCODE 0xe7f000f0 + +#define BUG_INSTR ".word " __stringify(BUG_OPCODE) + +#define BUG_FN_REG r0 + +#endif /* __ARM_ARM32_BUG_H__ */ diff --git a/xen/arch/arm/include/asm/arm32/cmpxchg.h b/xen/arch/arm/include/asm/arm32/cmpxchg.h new file mode 100644 index 0000000000..b0bd1d8b68 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/cmpxchg.h @@ -0,0 +1,229 @@ +#ifndef __ASM_ARM32_CMPXCHG_H +#define __ASM_ARM32_CMPXCHG_H + +#include + +extern void __bad_xchg(volatile void *, int); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret; + unsigned int tmp; + + smp_mb(); + prefetchw((const void *)ptr); + + switch (size) { + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + smp_mb(); + + return ret; +} + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); + +#define __CMPXCHG_CASE(sz, name) \ +static inline bool __cmpxchg_case_##name(volatile void *ptr, \ + unsigned long *old, \ + unsigned long new, \ + bool timeout, \ + unsigned int max_try) \ +{ \ + unsigned long oldval; \ + unsigned long res; \ + \ + do { \ + asm volatile("@ __cmpxchg_case_" #name "\n" \ + " ldrex" #sz " %1, [%2]\n" \ + " mov %0, #0\n" \ + " teq %1, %3\n" \ + " strex" #sz "eq %0, %4, [%2]\n" \ + : "=&r" (res), "=&r" (oldval) \ + : "r" (ptr), "Ir" (*old), "r" (new) \ + : "memory", "cc"); \ + \ + if (!res) \ + break; \ + } while (!timeout || ((--max_try) > 0)); \ + \ + *old = oldval; \ + \ + return !res; \ +} + +__CMPXCHG_CASE(b, 1) +__CMPXCHG_CASE(h, 2) +__CMPXCHG_CASE( , 4) + +static inline bool __cmpxchg_case_8(volatile uint64_t *ptr, + uint64_t *old, + uint64_t new, + bool timeout, + unsigned int max_try) +{ + uint64_t oldval; + uint64_t res; + + do { + asm volatile( + " ldrexd %1, %H1, [%3]\n" + " teq %1, %4\n" + " teqeq %H1, %H4\n" + " movne %0, #0\n" + " movne %H0, #0\n" + " bne 2f\n" + " strexd %0, %5, %H5, [%3]\n" + "2:" + : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) + : "r" (ptr), "r" (*old), "r" (new) + : "memory", "cc"); + if (!res) + break; + } while (!timeout || ((--max_try) > 0)); + + *old = oldval; + + return !res; +} + +static always_inline bool __int_cmpxchg(volatile void *ptr, unsigned long *old, + unsigned long new, int size, + bool timeout, unsigned int max_try) +{ + prefetchw((const void *)ptr); + + switch (size) { + case 1: + return __cmpxchg_case_1(ptr, old, new, timeout, max_try); + case 2: + return __cmpxchg_case_2(ptr, old, new, timeout, max_try); + case 4: + return __cmpxchg_case_4(ptr, old, new, timeout, max_try); + default: + return __bad_cmpxchg(ptr, size); + } + + ASSERT_UNREACHABLE(); +} + +static always_inline unsigned long __cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, + int size) +{ + smp_mb(); + if (!__int_cmpxchg(ptr, &old, new, size, false, 0)) + ASSERT_UNREACHABLE(); + smp_mb(); + + return old; +} + +/* + * The helper may fail to update the memory if the action takes too long. + * + * @old: On call the value pointed contains the expected old value. It will be + * updated to the actual old value. + * @max_try: Maximum number of iterations + * + * The helper will return true when the update has succeeded (i.e no + * timeout) and false if the update has failed. + */ +static always_inline bool __cmpxchg_timeout(volatile void *ptr, + unsigned long *old, + unsigned long new, + int size, + unsigned int max_try) +{ + bool ret; + + smp_mb(); + ret = __int_cmpxchg(ptr, old, new, size, true, max_try); + smp_mb(); + + return ret; +} + +/* + * The helper may fail to update the memory if the action takes too long. + * + * @old: On call the value pointed contains the expected old value. It will be + * updated to the actual old value. + * @max_try: Maximum number of iterations + * + * The helper will return true when the update has succeeded (i.e no + * timeout) and false if the update has failed. + */ +static always_inline bool __cmpxchg64_timeout(volatile uint64_t *ptr, + uint64_t *old, + uint64_t new, + unsigned int max_try) +{ + bool ret; + + smp_mb(); + ret = __cmpxchg_case_8(ptr, old, new, true, max_try); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +static inline uint64_t cmpxchg64(volatile uint64_t *ptr, + uint64_t old, + uint64_t new) +{ + smp_mb(); + if (!__cmpxchg_case_8(ptr, &old, new, false, 0)) + ASSERT_UNREACHABLE(); + smp_mb(); + + return old; +} + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/flushtlb.h b/xen/arch/arm/include/asm/arm32/flushtlb.h new file mode 100644 index 0000000000..9085e65011 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/flushtlb.h @@ -0,0 +1,63 @@ +#ifndef __ASM_ARM_ARM32_FLUSHTLB_H__ +#define __ASM_ARM_ARM32_FLUSHTLB_H__ + +/* + * Every invalidation operation use the following patterns: + * + * DSB ISHST // Ensure prior page-tables updates have completed + * TLBI... // Invalidate the TLB + * DSB ISH // Ensure the TLB invalidation has completed + * ISB // See explanation below + * + * For Xen page-tables the ISB will discard any instructions fetched + * from the old mappings. + * + * For the Stage-2 page-tables the ISB ensures the completion of the DSB + * (and therefore the TLB invalidation) before continuing. So we know + * the TLBs cannot contain an entry for a mapping we may have removed. + */ +#define TLB_HELPER(name, tlbop) \ +static inline void name(void) \ +{ \ + dsb(ishst); \ + WRITE_CP32(0, tlbop); \ + dsb(ish); \ + isb(); \ +} + +/* Flush local TLBs, current VMID only */ +TLB_HELPER(flush_guest_tlb_local, TLBIALL); + +/* Flush inner shareable TLBs, current VMID only */ +TLB_HELPER(flush_guest_tlb, TLBIALLIS); + +/* Flush local TLBs, all VMIDs, non-hypervisor mode */ +TLB_HELPER(flush_all_guests_tlb_local, TLBIALLNSNH); + +/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */ +TLB_HELPER(flush_all_guests_tlb, TLBIALLNSNHIS); + +/* Flush all hypervisor mappings from the TLB of the local processor. */ +TLB_HELPER(flush_xen_tlb_local, TLBIALLH); + +/* Flush TLB of local processor for address va. */ +static inline void __flush_xen_tlb_one_local(vaddr_t va) +{ + asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory"); +} + +/* Flush TLB of all processors in the inner-shareable domain for address va. */ +static inline void __flush_xen_tlb_one(vaddr_t va) +{ + asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory"); +} + +#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/insn.h b/xen/arch/arm/include/asm/arm32/insn.h new file mode 100644 index 0000000000..c800cbfff5 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/insn.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2017 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ARCH_ARM_ARM32_INSN +#define __ARCH_ARM_ARM32_INSN + +#include + +int32_t aarch32_get_branch_offset(uint32_t insn); +uint32_t aarch32_set_branch_offset(uint32_t insn, int32_t offset); + +/* Wrapper for common code */ +static inline bool insn_is_branch_imm(uint32_t insn) +{ + /* + * Xen is using ARM execution state only on ARM32 platform. So, the + * Thumb branch instructions (CBZ, CBNZ, TBB and TBH) will not be used + * in Xen. The left ARM32 branch instructions are BX, BLX, BL and B. + * BX is using register as parameter, we don't need to rewrite it. So, + * we only need to check BLX, BL and B encodings in this function. + * + * From ARM DDI 0406C.c Section A8.8.18 and A8.8.25, we can see these + * three branch instructions' encodings: + * - b cccc1010xxxxxxxxxxxxxxxxxxxxxxxx + * - bl cccc1011xxxxxxxxxxxxxxxxxxxxxxxx + * - blx 1111101Hxxxxxxxxxxxxxxxxxxxxxxxx + * + * The H bit of blx can be 0 or 1, it depends on the Instruction Sets of + * target instruction. Regardless, if we mask the conditional bits and + * bit 24 (H bit of blx), we can see all above branch instructions have + * the same value 0x0A000000. + * + * And from ARM DDI 0406C.c Section A5.7 Table A5-23, we can see that the + * blx is the only one unconditional instruction has the same value as + * conditional branch instructions. So, mask the conditional bits will not + * make other unconditional instruction to hit this check. + */ + return ( (insn & 0x0E000000) == 0x0A000000 ); +} + +static inline int32_t insn_get_branch_offset(uint32_t insn) +{ + return aarch32_get_branch_offset(insn); +} + +static inline uint32_t insn_set_branch_offset(uint32_t insn, int32_t offset) +{ + return aarch32_set_branch_offset(insn, offset); +} + +#endif /* !__ARCH_ARM_ARM32_INSN */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/io.h b/xen/arch/arm/include/asm/arm32/io.h new file mode 100644 index 0000000000..73a879e9fb --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/io.h @@ -0,0 +1,96 @@ +/* + * Based on linux arch/arm/include/asm/io.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Modifications: + * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both + * constant addresses and variable addresses. + * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture + * specific IO header files. + * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. + * 04-Apr-1999 PJB Added check_signature. + * 12-Dec-1999 RMK More cleanups + * 18-Jun-2000 RMK Removed virt_to_* and friends definitions + * 05-Oct-2004 BJD Moved memory string functions to use void __iomem + */ +#ifndef _ARM_ARM32_IO_H +#define _ARM_ARM32_IO_H + +#include +#include + +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("strb %1, %0" + : "+Qo" (*(volatile u8 __force *)addr) + : "r" (val)); +} + +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("strh %1, %0" + : "+Q" (*(volatile u16 __force *)addr) + : "r" (val)); +} + +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("str %1, %0" + : "+Qo" (*(volatile u32 __force *)addr) + : "r" (val)); +} + +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + asm volatile("ldrb %1, %0" + : "+Qo" (*(volatile u8 __force *)addr), + "=r" (val)); + return val; +} + +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + asm volatile("ldrh %1, %0" + : "+Q" (*(volatile u16 __force *)addr), + "=r" (val)); + return val; +} + +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + asm volatile("ldr %1, %0" + : "+Qo" (*(volatile u32 __force *)addr), + "=r" (val)); + return val; +} + +#define __iormb() rmb() +#define __iowmb() wmb() + +#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ + __raw_readw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ + __raw_readl(c)); __r; }) + +#define writeb_relaxed(v,c) __raw_writeb(v,c) +#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) +#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) + +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) + +#endif /* _ARM_ARM32_IO_H */ diff --git a/xen/arch/arm/include/asm/arm32/macros.h b/xen/arch/arm/include/asm/arm32/macros.h new file mode 100644 index 0000000000..a4e20aa520 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/macros.h @@ -0,0 +1,8 @@ +#ifndef __ASM_ARM_ARM32_MACROS_H +#define __ASM_ARM_ARM32_MACROS_H + + .macro ret + mov pc, lr + .endm + +#endif /* __ASM_ARM_ARM32_MACROS_H */ diff --git a/xen/arch/arm/include/asm/arm32/mm.h b/xen/arch/arm/include/asm/arm32/mm.h new file mode 100644 index 0000000000..68612499bf --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/mm.h @@ -0,0 +1,23 @@ +#ifndef __ARM_ARM32_MM_H__ +#define __ARM_ARM32_MM_H__ + +/* + * Only a limited amount of RAM, called xenheap, is always mapped on ARM32. + * For convenience always return false. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + return false; +} + +#endif /* __ARM_ARM32_MM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/page.h b/xen/arch/arm/include/asm/arm32/page.h new file mode 100644 index 0000000000..715a9e4fef --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/page.h @@ -0,0 +1,118 @@ +#ifndef __ARM_ARM32_PAGE_H__ +#define __ARM_ARM32_PAGE_H__ + +#ifndef __ASSEMBLY__ + +/* Write a pagetable entry. + * + * If the table entry is changing a text mapping, it is responsibility + * of the caller to issue an ISB after write_pte. + */ +static inline void write_pte(lpae_t *p, lpae_t pte) +{ + asm volatile ( + /* Ensure any writes have completed with the old mappings. */ + "dsb;" + /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */ + "strd %0, %H0, [%1];" + "dsb;" + : : "r" (pte.bits), "r" (p) : "memory"); +} + +/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */ +#define __invalidate_dcache_one(R) STORE_CP32(R, DCIMVAC) + +/* Inline ASM to flush dcache on register R (may be an inline asm operand) */ +#define __clean_dcache_one(R) STORE_CP32(R, DCCMVAC) + +/* Inline ASM to clean and invalidate dcache on register R (may be an + * inline asm operand) */ +#define __clean_and_invalidate_dcache_one(R) STORE_CP32(R, DCCIMVAC) + +/* + * Invalidate all instruction caches in Inner Shareable domain to PoU. + * We also need to flush the branch predictor for ARMv7 as it may be + * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). + */ +static inline void invalidate_icache(void) +{ + asm volatile ( + CMD_CP32(ICIALLUIS) /* Flush I-cache. */ + CMD_CP32(BPIALLIS) /* Flush branch predictor. */ + : : : "memory"); + + dsb(ish); /* Ensure completion of the flush I-cache */ + isb(); /* Synchronize fetched instruction stream. */ +} + +/* + * Invalidate all instruction caches on the local processor to PoU. + * We also need to flush the branch predictor for ARMv7 as it may be + * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). + */ +static inline void invalidate_icache_local(void) +{ + asm volatile ( + CMD_CP32(ICIALLU) /* Flush I-cache. */ + CMD_CP32(BPIALL) /* Flush branch predictor. */ + : : : "memory"); + + dsb(nsh); /* Ensure completion of the flush I-cache */ + isb(); /* Synchronize fetched instruction stream. */ +} + +/* Ask the MMU to translate a VA for us */ +static inline uint64_t __va_to_par(vaddr_t va) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + WRITE_CP32(va, ATS1HR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} + +/* Ask the MMU to translate a Guest VA for us */ +static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + if ( (flags & GV2M_WRITE) == GV2M_WRITE ) + WRITE_CP32(va, ATS12NSOPW); + else + WRITE_CP32(va, ATS12NSOPR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} +static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + if ( (flags & GV2M_WRITE) == GV2M_WRITE ) + WRITE_CP32(va, ATS1CPW); + else + WRITE_CP32(va, ATS1CPR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ARM_ARM32_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/processor.h b/xen/arch/arm/include/asm/arm32/processor.h new file mode 100644 index 0000000000..4e679f3273 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/processor.h @@ -0,0 +1,69 @@ +#ifndef __ASM_ARM_ARM32_PROCESSOR_H +#define __ASM_ARM_ARM32_PROCESSOR_H + +#define ACTLR_CAXX_SMP (1<<6) + +#ifndef __ASSEMBLY__ +/* On stack VCPU state */ +struct cpu_user_regs +{ + uint32_t r0; + uint32_t r1; + uint32_t r2; + uint32_t r3; + uint32_t r4; + uint32_t r5; + uint32_t r6; + uint32_t r7; + uint32_t r8; + uint32_t r9; + uint32_t r10; + union { + uint32_t r11; + uint32_t fp; + }; + uint32_t r12; + + uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */ + + /* r14 - LR: is the same physical register as LR_usr */ + union { + uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */ + + uint32_t lr_usr; + }; + + union { /* Return IP, pc32 is used to allow code to be common with 64-bit */ + uint32_t pc, pc32; + }; + uint32_t cpsr; /* Return mode */ + uint32_t hsr; /* Exception Syndrome */ + + /* Outer guest frame only from here on... */ + + uint32_t sp_usr; /* LR_usr is the same register as LR, see above */ + + uint32_t sp_irq, lr_irq; + uint32_t sp_svc, lr_svc; + uint32_t sp_abt, lr_abt; + uint32_t sp_und, lr_und; + + uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq; + uint32_t sp_fiq, lr_fiq; + + uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq; + + uint32_t pad1; /* Doubleword-align the user half of the frame */ +}; + +#endif + +#endif /* __ASM_ARM_ARM32_PROCESSOR_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/sysregs.h b/xen/arch/arm/include/asm/arm32/sysregs.h new file mode 100644 index 0000000000..6841d5de43 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/sysregs.h @@ -0,0 +1,78 @@ +#ifndef __ASM_ARM_ARM32_SYSREGS_H +#define __ASM_ARM_ARM32_SYSREGS_H + +#include +#include + +/* Layout as used in assembly, with src/dest registers mixed in */ +#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2 +#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm +#define CP32(r, name...) __CP32(r, name) +#define CP64(r, name...) __CP64(r, name) + +/* Stringified for inline assembly */ +#define LOAD_CP32(r, name...) "mrc " __stringify(CP32(%r, name)) ";" +#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";" +#define LOAD_CP64(r, name...) "mrrc " __stringify(CP64(%r, %H##r, name)) ";" +#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";" + +/* Issue a CP operation which takes no argument, + * uses r0 as a placeholder register. */ +#define CMD_CP32(name...) "mcr " __stringify(CP32(r0, name)) ";" + +#ifndef __ASSEMBLY__ + +/* C wrappers */ +#define READ_CP32(name...) ({ \ + register uint32_t _r; \ + asm volatile(LOAD_CP32(0, name) : "=r" (_r)); \ + _r; }) + +#define WRITE_CP32(v, name...) do { \ + register uint32_t _r = (v); \ + asm volatile(STORE_CP32(0, name) : : "r" (_r)); \ +} while (0) + +#define READ_CP64(name...) ({ \ + register uint64_t _r; \ + asm volatile(LOAD_CP64(0, name) : "=r" (_r)); \ + _r; }) + +#define WRITE_CP64(v, name...) do { \ + register uint64_t _r = (v); \ + asm volatile(STORE_CP64(0, name) : : "r" (_r)); \ +} while (0) + +/* + * C wrappers for accessing system registers. + * + * Registers come in 3 types: + * - those which are always 32-bit regardless of AArch32 vs AArch64 + * (use {READ,WRITE}_SYSREG32). + * - those which are always 64-bit regardless of AArch32 vs AArch64 + * (use {READ,WRITE}_SYSREG64). + * - those which vary between AArch32 and AArch64 (use {READ,WRITE}_SYSREG). + */ +#define READ_SYSREG32(R...) READ_CP32(R) +#define WRITE_SYSREG32(V, R...) WRITE_CP32(V, R) + +#define READ_SYSREG64(R...) READ_CP64(R) +#define WRITE_SYSREG64(V, R...) WRITE_CP64(V, R) + +#define READ_SYSREG(R...) READ_SYSREG32(R) +#define WRITE_SYSREG(V, R...) WRITE_SYSREG32(V, R) + +/* MVFR2 is not defined on ARMv7 */ +#define MVFR2_MAYBE_UNDEFINED + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARM_ARM32_SYSREGS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/system.h b/xen/arch/arm/include/asm/arm32/system.h new file mode 100644 index 0000000000..ab57abfbc5 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/system.h @@ -0,0 +1,77 @@ +/* Portions taken from Linux arch arm */ +#ifndef __ASM_ARM32_SYSTEM_H +#define __ASM_ARM32_SYSTEM_H + +#include + +#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" ) +#define local_irq_enable() asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" ) + +#define local_save_flags(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( "mrs %0, cpsr @ local_save_flags\n" \ + : "=r" (x) :: "memory", "cc" ); \ +}) +#define local_irq_save(x) \ +({ \ + local_save_flags(x); \ + local_irq_disable(); \ +}) +#define local_irq_restore(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( \ + "msr cpsr_c, %0 @ local_irq_restore\n" \ + : \ + : "r" (x) \ + : "memory", "cc"); \ +}) + +static inline int local_irq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_IRQ_MASK); +} + +#define local_fiq_enable() __asm__("cpsie f @ __stf\n" : : : "memory", "cc") +#define local_fiq_disable() __asm__("cpsid f @ __clf\n" : : : "memory", "cc") + +#define local_abort_enable() __asm__("cpsie a @ __sta\n" : : : "memory", "cc") +#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc") + +static inline int local_fiq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_FIQ_MASK); +} + +#define CSDB ".inst 0xe320f014" + +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( "cmp %1, %2\n" + "sbc %0, %1, %1\n" + CSDB + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc" ); + + return mask; +} +#define array_index_mask_nospec array_index_mask_nospec + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm32/traps.h b/xen/arch/arm/include/asm/arm32/traps.h new file mode 100644 index 0000000000..e3c4a8b473 --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/traps.h @@ -0,0 +1,13 @@ +#ifndef __ASM_ARM32_TRAPS__ +#define __ASM_ARM32_TRAPS__ + +#endif /* __ASM_ARM32_TRAPS__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ + diff --git a/xen/arch/arm/include/asm/arm32/vfp.h b/xen/arch/arm/include/asm/arm32/vfp.h new file mode 100644 index 0000000000..bade3bc66e --- /dev/null +++ b/xen/arch/arm/include/asm/arm32/vfp.h @@ -0,0 +1,41 @@ +#ifndef _ARM_ARM32_VFP_H +#define _ARM_ARM32_VFP_H + +#define FPEXC_EX (1u << 31) +#define FPEXC_EN (1u << 30) +#define FPEXC_FP2V (1u << 28) + +#define MVFR0_A_SIMD_MASK (0xf << 0) + + +#define FPSID_IMPLEMENTER_BIT (24) +#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT) +#define FPSID_ARCH_BIT (16) +#define FPSID_ARCH_MASK (0xf << FPSID_ARCH_BIT) +#define FPSID_PART_BIT (8) +#define FPSID_PART_MASK (0xff << FPSID_PART_BIT) +#define FPSID_VARIANT_BIT (4) +#define FPSID_VARIANT_MASK (0xf << FPSID_VARIANT_BIT) +#define FPSID_REV_BIT (0) +#define FPSID_REV_MASK (0xf << FPSID_REV_BIT) + +struct vfp_state +{ + uint64_t fpregs1[16]; /* {d0-d15} */ + uint64_t fpregs2[16]; /* {d16-d31} */ + uint32_t fpexc; + uint32_t fpscr; + /* VFP implementation specific state */ + uint32_t fpinst; + uint32_t fpinst2; +}; + +#endif /* _ARM_ARM32_VFP_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/atomic.h b/xen/arch/arm/include/asm/arm64/atomic.h new file mode 100644 index 0000000000..2d42567866 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/atomic.h @@ -0,0 +1,148 @@ +/* + * Based on arch/arm64/include/asm/atomic.h + * which in turn is + * Based on arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ARCH_ARM_ARM64_ATOMIC +#define __ARCH_ARM_ARM64_ATOMIC + +/* + * AArch64 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i)); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add_return\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "memory"); + + smp_mb(); + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i)); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub_return\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "memory"); + + smp_mb(); + return result; +} + +static inline void atomic_and(int m, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_and\n" +"1: ldxr %w0, %2\n" +" and %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (m)); +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long tmp; + int oldval; + + smp_mb(); + + asm volatile("// atomic_cmpxchg\n" +"1: ldxr %w1, %2\n" +" cmp %w1, %w3\n" +" b.ne 2f\n" +" stxr %w0, %w4, %2\n" +" cbnz %w0, 1b\n" +"2:" + : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc"); + + smp_mb(); + return oldval; +} + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c; +} + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/bitops.h b/xen/arch/arm/include/asm/arm64/bitops.h new file mode 100644 index 0000000000..d85a49bca4 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/bitops.h @@ -0,0 +1,98 @@ +#ifndef _ARM_ARM64_BITOPS_H +#define _ARM_ARM64_BITOPS_H + +/* Based on linux/include/asm-generic/bitops/builtin-__ffs.h */ +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static /*__*/always_inline unsigned long __ffs(unsigned long word) +{ + return __builtin_ctzl(word); +} + +/* Based on linux/include/asm-generic/bitops/ffz.h */ +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +#define ffz(x) __ffs(~(x)) + +static inline int flsl(unsigned long x) +{ + uint64_t ret; + + if (__builtin_constant_p(x)) + return generic_flsl(x); + + asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); + + return BITS_PER_LONG - ret; +} + +/* Based on linux/include/asm-generic/bitops/find.h */ + +#ifndef find_next_bit +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); +#endif + +#ifndef find_next_zero_bit +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); +#endif + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + + +#endif /* _ARM_ARM64_BITOPS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/brk.h b/xen/arch/arm/include/asm/arm64/brk.h new file mode 100644 index 0000000000..04442c4b9f --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/brk.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_ARM64_BRK +#define __ASM_ARM_ARM64_BRK + +/* + * #imm16 values used for BRK instruction generation + * 0x001: xen-mode BUG() and WARN() traps + * 0x002: for triggering a fault on purpose (reserved) + */ +#define BRK_BUG_FRAME_IMM 1 +#define BRK_FAULT_IMM 2 + +/* + * BRK instruction encoding + * The #imm16 value should be placed at bits[20:5] within BRK ins + */ +#define AARCH64_BREAK_MON 0xd4200000 + +/* + * BRK instruction for provoking a fault on purpose + */ +#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (BRK_FAULT_IMM << 5)) + +#endif /* !__ASM_ARM_ARM64_BRK */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/bug.h b/xen/arch/arm/include/asm/arm64/bug.h new file mode 100644 index 0000000000..5e11c0dfd5 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/bug.h @@ -0,0 +1,11 @@ +#ifndef __ARM_ARM64_BUG_H__ +#define __ARM_ARM64_BUG_H__ + +#include +#include + +#define BUG_INSTR "brk " __stringify(BRK_BUG_FRAME_IMM) + +#define BUG_FN_REG x0 + +#endif /* __ARM_ARM64_BUG_H__ */ diff --git a/xen/arch/arm/include/asm/arm64/cmpxchg.h b/xen/arch/arm/include/asm/arm64/cmpxchg.h new file mode 100644 index 0000000000..10e4edc022 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/cmpxchg.h @@ -0,0 +1,183 @@ +#ifndef __ASM_ARM64_CMPXCHG_H +#define __ASM_ARM64_CMPXCHG_H + +extern void __bad_xchg(volatile void *, int); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, tmp; + + switch (size) { + case 1: + asm volatile("// __xchg1\n" + "1: ldxrb %w0, %2\n" + " stlxrb %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) + : "r" (x) + : "memory"); + break; + case 2: + asm volatile("// __xchg2\n" + "1: ldxrh %w0, %2\n" + " stlxrh %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) + : "r" (x) + : "memory"); + break; + case 4: + asm volatile("// __xchg4\n" + "1: ldxr %w0, %2\n" + " stlxr %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) + : "r" (x) + : "memory"); + break; + case 8: + asm volatile("// __xchg8\n" + "1: ldxr %0, %2\n" + " stlxr %w1, %3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) + : "r" (x) + : "memory"); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + + smp_mb(); + return ret; +} + +#define xchg(ptr,x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) + +extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); + +#define __CMPXCHG_CASE(w, sz, name) \ +static inline bool __cmpxchg_case_##name(volatile void *ptr, \ + unsigned long *old, \ + unsigned long new, \ + bool timeout, \ + unsigned int max_try) \ +{ \ + unsigned long oldval; \ + unsigned long res; \ + \ + do { \ + asm volatile("// __cmpxchg_case_" #name "\n" \ + " ldxr" #sz " %" #w "1, %2\n" \ + " mov %w0, #0\n" \ + " cmp %" #w "1, %" #w "3\n" \ + " b.ne 1f\n" \ + " stxr" #sz " %w0, %" #w "4, %2\n" \ + "1:\n" \ + : "=&r" (res), "=&r" (oldval), \ + "+Q" (*(unsigned long *)ptr) \ + : "Ir" (*old), "r" (new) \ + : "cc"); \ + \ + if (!res) \ + break; \ + } while (!timeout || ((--max_try) > 0)); \ + \ + *old = oldval; \ + \ + return !res; \ +} + +__CMPXCHG_CASE(w, b, 1) +__CMPXCHG_CASE(w, h, 2) +__CMPXCHG_CASE(w, , 4) +__CMPXCHG_CASE( , , 8) + +static always_inline bool __int_cmpxchg(volatile void *ptr, unsigned long *old, + unsigned long new, int size, + bool timeout, unsigned int max_try) +{ + switch (size) { + case 1: + return __cmpxchg_case_1(ptr, old, new, timeout, max_try); + case 2: + return __cmpxchg_case_2(ptr, old, new, timeout, max_try); + case 4: + return __cmpxchg_case_4(ptr, old, new, timeout, max_try); + case 8: + return __cmpxchg_case_8(ptr, old, new, timeout, max_try); + default: + return __bad_cmpxchg(ptr, size); + } + + ASSERT_UNREACHABLE(); +} + +static always_inline unsigned long __cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, + int size) +{ + smp_mb(); + if (!__int_cmpxchg(ptr, &old, new, size, false, 0)) + ASSERT_UNREACHABLE(); + smp_mb(); + + return old; +} + +/* + * The helper may fail to update the memory if the action takes too long. + * + * @old: On call the value pointed contains the expected old value. It will be + * updated to the actual old value. + * @max_try: Maximum number of iterations + * + * The helper will return true when the update has succeeded (i.e no + * timeout) and false if the update has failed. + */ +static always_inline bool __cmpxchg_timeout(volatile void *ptr, + unsigned long *old, + unsigned long new, + int size, + unsigned int max_try) +{ + bool ret; + + smp_mb(); + ret = __int_cmpxchg(ptr, old, new, size, true, max_try); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ + __ret; \ +}) + +#define cmpxchg64(ptr, o, n) cmpxchg(ptr, o, n) + +#define __cmpxchg64_timeout(ptr, old, new, max_try) \ + __cmpxchg_timeout(ptr, old, new, 8, max_try) + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/cpufeature.h b/xen/arch/arm/include/asm/arm64/cpufeature.h new file mode 100644 index 0000000000..d9b9fa77cb --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/cpufeature.h @@ -0,0 +1,104 @@ +#ifndef __ASM_ARM_ARM64_CPUFEATURES_H +#define __ASM_ARM_ARM64_CPUFEATURES_H + +/* + * CPU feature register tracking + * + * The safe value of a CPUID feature field is dependent on the implications + * of the values assigned to it by the architecture. Based on the relationship + * between the values, the features are classified into 3 types - LOWER_SAFE, + * HIGHER_SAFE and EXACT. + * + * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest + * for HIGHER_SAFE. It is expected that all CPUs have the same value for + * a field when EXACT is specified, failing which, the safe value specified + * in the table is chosen. + */ + +enum ftr_type { + FTR_EXACT, /* Use a predefined safe value */ + FTR_LOWER_SAFE, /* Smaller value is safe */ + FTR_HIGHER_SAFE, /* Bigger value is safe */ + FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ +}; + +#define FTR_STRICT true /* SANITY check strict matching required */ +#define FTR_NONSTRICT false /* SANITY check ignored */ + +#define FTR_SIGNED true /* Value should be treated as signed */ +#define FTR_UNSIGNED false /* Value should be treated as unsigned */ + +#define FTR_VISIBLE true /* Feature visible to the user space */ +#define FTR_HIDDEN false /* Feature is hidden from the user */ + +#define FTR_VISIBLE_IF_IS_ENABLED(config) \ + (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN) + +struct arm64_ftr_bits { + bool sign; /* Value is signed ? */ + bool visible; + bool strict; /* CPU Sanity check: strict matching required ? */ + enum ftr_type type; + u8 shift; + u8 width; + s64 safe_val; /* safe value for FTR_EXACT features */ +}; + +static inline int __attribute_const__ +cpuid_feature_extract_signed_field_width(u64 features, int field, int width) +{ + return (s64)(features << (64 - width - field)) >> (64 - width); +} + +static inline int __attribute_const__ +cpuid_feature_extract_signed_field(u64 features, int field) +{ + return cpuid_feature_extract_signed_field_width(features, field, 4); +} + +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) +{ + return (u64)(features << (64 - width - field)) >> (64 - width); +} + +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field(u64 features, int field) +{ + return cpuid_feature_extract_unsigned_field_width(features, field, 4); +} + +static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) +{ + return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); +} + +static inline int __attribute_const__ +cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) +{ + return (sign) ? + cpuid_feature_extract_signed_field_width(features, field, width) : + cpuid_feature_extract_unsigned_field_width(features, field, width); +} + +static inline int __attribute_const__ +cpuid_feature_extract_field(u64 features, int field, bool sign) +{ + return cpuid_feature_extract_field_width(features, field, 4, sign); +} + +static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) +{ + return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); +} + +#endif /* _ASM_ARM_ARM64_CPUFEATURES_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/efibind.h b/xen/arch/arm/include/asm/arm64/efibind.h new file mode 100644 index 0000000000..2b0bf40bf2 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/efibind.h @@ -0,0 +1,216 @@ +/*++ + +Copyright (c) 1998 Intel Corporation + +Module Name: + + efefind.h + +Abstract: + + EFI to compile bindings + + + + +Revision History + +--*/ + +#ifndef __GNUC__ +#pragma pack() +#endif + +#define EFIERR(a) (0x8000000000000000 | a) +#define EFI_ERROR_MASK 0x8000000000000000 +#define EFIERR_OEM(a) (0xc000000000000000 | a) + +#define BAD_POINTER 0xFBFBFBFBFBFBFBFB +#define MAX_ADDRESS 0xFFFFFFFFFFFFFFFF + +#define EFI_STUB_ERROR MAX_ADDRESS + +#ifndef __ASSEMBLY__ +// +// Basic int types of various widths +// + +#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L ) + + // No ANSI C 1999/2000 stdint.h integer width declarations + + #if defined(__GNUC__) + typedef unsigned long long uint64_t __attribute__((aligned (8))); + typedef long long int64_t __attribute__((aligned (8))); + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef char int8_t; + #elif defined(UNIX_LP64) + + /* Use LP64 programming model from C_FLAGS for integer width declarations */ + + typedef unsigned long uint64_t; + typedef long int64_t; + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef char int8_t; + #else + + /* Assume P64 programming model from C_FLAGS for integer width declarations */ + + typedef unsigned long long uint64_t __attribute__((aligned (8))); + typedef long long int64_t __attribute__((aligned (8))); + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef char int8_t; + #endif +#endif + +// +// Basic EFI types of various widths +// + +#ifndef __WCHAR_TYPE__ +# define __WCHAR_TYPE__ short +#endif + +typedef uint64_t UINT64; +typedef int64_t INT64; + +#ifndef _BASETSD_H_ + typedef uint32_t UINT32; + typedef int32_t INT32; +#endif + +typedef uint16_t UINT16; +typedef int16_t INT16; +typedef uint8_t UINT8; +typedef int8_t INT8; +typedef __WCHAR_TYPE__ WCHAR; + +#undef VOID +#define VOID void + + +typedef int64_t INTN; +typedef uint64_t UINTN; + +#define POST_CODE(_Data) + + +#define BREAKPOINT() while (TRUE); // Make it hang on Bios[Dbg]32 + +// +// Pointers must be aligned to these address to function +// + +#define MIN_ALIGNMENT_SIZE 4 + +#define ALIGN_VARIABLE(Value ,Adjustment) \ + (UINTN)Adjustment = 0; \ + if((UINTN)Value % MIN_ALIGNMENT_SIZE) \ + (UINTN)Adjustment = MIN_ALIGNMENT_SIZE - ((UINTN)Value % MIN_ALIGNMENT_SIZE); \ + Value = (UINTN)Value + (UINTN)Adjustment + + +// +// Define macros to build data structure signatures from characters. +// + +#define EFI_SIGNATURE_16(A,B) ((A) | (B<<8)) +#define EFI_SIGNATURE_32(A,B,C,D) (EFI_SIGNATURE_16(A,B) | (EFI_SIGNATURE_16(C,D) << 16)) +#define EFI_SIGNATURE_64(A,B,C,D,E,F,G,H) (EFI_SIGNATURE_32(A,B,C,D) | ((UINT64)(EFI_SIGNATURE_32(E,F,G,H)) << 32)) + +#define EXPORTAPI + + +// +// EFIAPI - prototype calling convention for EFI function pointers +// BOOTSERVICE - prototype for implementation of a boot service interface +// RUNTIMESERVICE - prototype for implementation of a runtime service interface +// RUNTIMEFUNCTION - prototype for implementation of a runtime function that is not a service +// RUNTIME_CODE - pragma macro for declaring runtime code +// + +#ifndef EFIAPI // Forces EFI calling conventions reguardless of compiler options + #define EFIAPI // Substitute expresion to force C calling convention +#endif + +#define BOOTSERVICE +//#define RUNTIMESERVICE(proto,a) alloc_text("rtcode",a); proto a +//#define RUNTIMEFUNCTION(proto,a) alloc_text("rtcode",a); proto a +#define RUNTIMESERVICE +#define RUNTIMEFUNCTION + + +#define RUNTIME_CODE(a) alloc_text("rtcode", a) +#define BEGIN_RUNTIME_DATA() data_seg("rtdata") +#define END_RUNTIME_DATA() data_seg("") + +#define VOLATILE volatile + +#define MEMORY_FENCE() + + +// +// When build similiar to FW, then link everything together as +// one big module. +// + +#define EFI_DRIVER_ENTRY_POINT(InitFunction) \ + UINTN \ + InitializeDriver ( \ + VOID *ImageHandle, \ + VOID *SystemTable \ + ) \ + { \ + return InitFunction(ImageHandle, \ + SystemTable); \ + } \ + \ + EFI_STATUS efi_main( \ + EFI_HANDLE image, \ + EFI_SYSTEM_TABLE *systab \ + ) __attribute__((weak, \ + alias ("InitializeDriver"))); + +#define LOAD_INTERNAL_DRIVER(_if, type, name, entry) \ + (_if)->LoadInternal(type, name, entry) + + +// +// Some compilers don't support the forward reference construct: +// typedef struct XXXXX +// +// The following macro provide a workaround for such cases. +// +#ifdef NO_INTERFACE_DECL +#define INTERFACE_DECL(x) +#else +#ifdef __GNUC__ +#define INTERFACE_DECL(x) struct x +#else +#define INTERFACE_DECL(x) typedef struct x +#endif +#endif + +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/flushtlb.h b/xen/arch/arm/include/asm/arm64/flushtlb.h new file mode 100644 index 0000000000..7c54315187 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/flushtlb.h @@ -0,0 +1,77 @@ +#ifndef __ASM_ARM_ARM64_FLUSHTLB_H__ +#define __ASM_ARM_ARM64_FLUSHTLB_H__ + +/* + * Every invalidation operation use the following patterns: + * + * DSB ISHST // Ensure prior page-tables updates have completed + * TLBI... // Invalidate the TLB + * DSB ISH // Ensure the TLB invalidation has completed + * ISB // See explanation below + * + * ARM64_WORKAROUND_REPEAT_TLBI: + * Modification of the translation table for a virtual address might lead to + * read-after-read ordering violation. + * The workaround repeats TLBI+DSB operation for all the TLB flush operations. + * While this is stricly not necessary, we don't want to take any risk. + * + * For Xen page-tables the ISB will discard any instructions fetched + * from the old mappings. + * + * For the Stage-2 page-tables the ISB ensures the completion of the DSB + * (and therefore the TLB invalidation) before continuing. So we know + * the TLBs cannot contain an entry for a mapping we may have removed. + */ +#define TLB_HELPER(name, tlbop) \ +static inline void name(void) \ +{ \ + asm volatile( \ + "dsb ishst;" \ + "tlbi " # tlbop ";" \ + ALTERNATIVE( \ + "nop; nop;", \ + "dsb ish;" \ + "tlbi " # tlbop ";", \ + ARM64_WORKAROUND_REPEAT_TLBI, \ + CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ + "dsb ish;" \ + "isb;" \ + : : : "memory"); \ +} + +/* Flush local TLBs, current VMID only. */ +TLB_HELPER(flush_guest_tlb_local, vmalls12e1); + +/* Flush innershareable TLBs, current VMID only */ +TLB_HELPER(flush_guest_tlb, vmalls12e1is); + +/* Flush local TLBs, all VMIDs, non-hypervisor mode */ +TLB_HELPER(flush_all_guests_tlb_local, alle1); + +/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */ +TLB_HELPER(flush_all_guests_tlb, alle1is); + +/* Flush all hypervisor mappings from the TLB of the local processor. */ +TLB_HELPER(flush_xen_tlb_local, alle2); + +/* Flush TLB of local processor for address va. */ +static inline void __flush_xen_tlb_one_local(vaddr_t va) +{ + asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory"); +} + +/* Flush TLB of all processors in the inner-shareable domain for address va. */ +static inline void __flush_xen_tlb_one(vaddr_t va) +{ + asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory"); +} + +#endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/hsr.h b/xen/arch/arm/include/asm/arm64/hsr.h new file mode 100644 index 0000000000..e691d41c17 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/hsr.h @@ -0,0 +1,159 @@ +#ifndef __ASM_ARM_ARM64_HSR_H +#define __ASM_ARM_ARM64_HSR_H + +/* AArch 64 System Register Encodings */ +#define __HSR_SYSREG_c0 0 +#define __HSR_SYSREG_c1 1 +#define __HSR_SYSREG_c2 2 +#define __HSR_SYSREG_c3 3 +#define __HSR_SYSREG_c4 4 +#define __HSR_SYSREG_c5 5 +#define __HSR_SYSREG_c6 6 +#define __HSR_SYSREG_c7 7 +#define __HSR_SYSREG_c8 8 +#define __HSR_SYSREG_c9 9 +#define __HSR_SYSREG_c10 10 +#define __HSR_SYSREG_c11 11 +#define __HSR_SYSREG_c12 12 +#define __HSR_SYSREG_c13 13 +#define __HSR_SYSREG_c14 14 +#define __HSR_SYSREG_c15 15 + +#define __HSR_SYSREG_0 0 +#define __HSR_SYSREG_1 1 +#define __HSR_SYSREG_2 2 +#define __HSR_SYSREG_3 3 +#define __HSR_SYSREG_4 4 +#define __HSR_SYSREG_5 5 +#define __HSR_SYSREG_6 6 +#define __HSR_SYSREG_7 7 + +/* These are used to decode traps with HSR.EC==HSR_EC_SYSREG */ +#define HSR_SYSREG(op0,op1,crn,crm,op2) \ + (((__HSR_SYSREG_##op0) << HSR_SYSREG_OP0_SHIFT) | \ + ((__HSR_SYSREG_##op1) << HSR_SYSREG_OP1_SHIFT) | \ + ((__HSR_SYSREG_##crn) << HSR_SYSREG_CRN_SHIFT) | \ + ((__HSR_SYSREG_##crm) << HSR_SYSREG_CRM_SHIFT) | \ + ((__HSR_SYSREG_##op2) << HSR_SYSREG_OP2_SHIFT)) + +#define HSR_SYSREG_DCISW HSR_SYSREG(1,0,c7,c6,2) +#define HSR_SYSREG_DCCSW HSR_SYSREG(1,0,c7,c10,2) +#define HSR_SYSREG_DCCISW HSR_SYSREG(1,0,c7,c14,2) + +#define HSR_SYSREG_MDSCR_EL1 HSR_SYSREG(2,0,c0,c2,2) +#define HSR_SYSREG_MDRAR_EL1 HSR_SYSREG(2,0,c1,c0,0) +#define HSR_SYSREG_OSLAR_EL1 HSR_SYSREG(2,0,c1,c0,4) +#define HSR_SYSREG_OSLSR_EL1 HSR_SYSREG(2,0,c1,c1,4) +#define HSR_SYSREG_OSDLR_EL1 HSR_SYSREG(2,0,c1,c3,4) +#define HSR_SYSREG_DBGPRCR_EL1 HSR_SYSREG(2,0,c1,c4,4) +#define HSR_SYSREG_MDCCSR_EL0 HSR_SYSREG(2,3,c0,c1,0) + +#define HSR_SYSREG_DBGBVRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,4) +#define HSR_SYSREG_DBGBCRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,5) +#define HSR_SYSREG_DBGWVRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,6) +#define HSR_SYSREG_DBGWCRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,7) + +#define HSR_SYSREG_DBG_CASES(REG) case HSR_SYSREG_##REG##n_EL1(0): \ + case HSR_SYSREG_##REG##n_EL1(1): \ + case HSR_SYSREG_##REG##n_EL1(2): \ + case HSR_SYSREG_##REG##n_EL1(3): \ + case HSR_SYSREG_##REG##n_EL1(4): \ + case HSR_SYSREG_##REG##n_EL1(5): \ + case HSR_SYSREG_##REG##n_EL1(6): \ + case HSR_SYSREG_##REG##n_EL1(7): \ + case HSR_SYSREG_##REG##n_EL1(8): \ + case HSR_SYSREG_##REG##n_EL1(9): \ + case HSR_SYSREG_##REG##n_EL1(10): \ + case HSR_SYSREG_##REG##n_EL1(11): \ + case HSR_SYSREG_##REG##n_EL1(12): \ + case HSR_SYSREG_##REG##n_EL1(13): \ + case HSR_SYSREG_##REG##n_EL1(14): \ + case HSR_SYSREG_##REG##n_EL1(15) + +#define HSR_SYSREG_SCTLR_EL1 HSR_SYSREG(3,0,c1, c0,0) +#define HSR_SYSREG_ACTLR_EL1 HSR_SYSREG(3,0,c1, c0,1) +#define HSR_SYSREG_TTBR0_EL1 HSR_SYSREG(3,0,c2, c0,0) +#define HSR_SYSREG_TTBR1_EL1 HSR_SYSREG(3,0,c2, c0,1) +#define HSR_SYSREG_TCR_EL1 HSR_SYSREG(3,0,c2, c0,2) +#define HSR_SYSREG_AFSR0_EL1 HSR_SYSREG(3,0,c5, c1,0) +#define HSR_SYSREG_AFSR1_EL1 HSR_SYSREG(3,0,c5, c1,1) +#define HSR_SYSREG_ESR_EL1 HSR_SYSREG(3,0,c5, c2,0) +#define HSR_SYSREG_FAR_EL1 HSR_SYSREG(3,0,c6, c0,0) +#define HSR_SYSREG_PMINTENSET_EL1 HSR_SYSREG(3,0,c9,c14,1) +#define HSR_SYSREG_PMINTENCLR_EL1 HSR_SYSREG(3,0,c9,c14,2) +#define HSR_SYSREG_MAIR_EL1 HSR_SYSREG(3,0,c10,c2,0) +#define HSR_SYSREG_AMAIR_EL1 HSR_SYSREG(3,0,c10,c3,0) +#define HSR_SYSREG_ICC_SGI1R_EL1 HSR_SYSREG(3,0,c12,c11,5) +#define HSR_SYSREG_ICC_ASGI1R_EL1 HSR_SYSREG(3,1,c12,c11,6) +#define HSR_SYSREG_ICC_SGI0R_EL1 HSR_SYSREG(3,2,c12,c11,7) +#define HSR_SYSREG_ICC_SRE_EL1 HSR_SYSREG(3,0,c12,c12,5) +#define HSR_SYSREG_CONTEXTIDR_EL1 HSR_SYSREG(3,0,c13,c0,1) + +#define HSR_SYSREG_PMCR_EL0 HSR_SYSREG(3,3,c9,c12,0) +#define HSR_SYSREG_PMCNTENSET_EL0 HSR_SYSREG(3,3,c9,c12,1) +#define HSR_SYSREG_PMCNTENCLR_EL0 HSR_SYSREG(3,3,c9,c12,2) +#define HSR_SYSREG_PMOVSCLR_EL0 HSR_SYSREG(3,3,c9,c12,3) +#define HSR_SYSREG_PMSWINC_EL0 HSR_SYSREG(3,3,c9,c12,4) +#define HSR_SYSREG_PMSELR_EL0 HSR_SYSREG(3,3,c9,c12,5) +#define HSR_SYSREG_PMCEID0_EL0 HSR_SYSREG(3,3,c9,c12,6) +#define HSR_SYSREG_PMCEID1_EL0 HSR_SYSREG(3,3,c9,c12,7) + +#define HSR_SYSREG_PMCCNTR_EL0 HSR_SYSREG(3,3,c9,c13,0) +#define HSR_SYSREG_PMXEVTYPER_EL0 HSR_SYSREG(3,3,c9,c13,1) +#define HSR_SYSREG_PMXEVCNTR_EL0 HSR_SYSREG(3,3,c9,c13,2) + +#define HSR_SYSREG_PMUSERENR_EL0 HSR_SYSREG(3,3,c9,c14,0) +#define HSR_SYSREG_PMOVSSET_EL0 HSR_SYSREG(3,3,c9,c14,3) + +#define HSR_SYSREG_CNTPCT_EL0 HSR_SYSREG(3,3,c14,c0,0) +#define HSR_SYSREG_CNTP_TVAL_EL0 HSR_SYSREG(3,3,c14,c2,0) +#define HSR_SYSREG_CNTP_CTL_EL0 HSR_SYSREG(3,3,c14,c2,1) +#define HSR_SYSREG_CNTP_CVAL_EL0 HSR_SYSREG(3,3,c14,c2,2) + +/* Those registers are used when HCR_EL2.TID3 is set */ +#define HSR_SYSREG_ID_PFR0_EL1 HSR_SYSREG(3,0,c0,c1,0) +#define HSR_SYSREG_ID_PFR1_EL1 HSR_SYSREG(3,0,c0,c1,1) +#define HSR_SYSREG_ID_PFR2_EL1 HSR_SYSREG(3,0,c0,c3,4) +#define HSR_SYSREG_ID_DFR0_EL1 HSR_SYSREG(3,0,c0,c1,2) +#define HSR_SYSREG_ID_DFR1_EL1 HSR_SYSREG(3,0,c0,c3,5) +#define HSR_SYSREG_ID_AFR0_EL1 HSR_SYSREG(3,0,c0,c1,3) +#define HSR_SYSREG_ID_MMFR0_EL1 HSR_SYSREG(3,0,c0,c1,4) +#define HSR_SYSREG_ID_MMFR1_EL1 HSR_SYSREG(3,0,c0,c1,5) +#define HSR_SYSREG_ID_MMFR2_EL1 HSR_SYSREG(3,0,c0,c1,6) +#define HSR_SYSREG_ID_MMFR3_EL1 HSR_SYSREG(3,0,c0,c1,7) +#define HSR_SYSREG_ID_MMFR4_EL1 HSR_SYSREG(3,0,c0,c2,6) +#define HSR_SYSREG_ID_MMFR5_EL1 HSR_SYSREG(3,0,c0,c3,6) +#define HSR_SYSREG_ID_ISAR0_EL1 HSR_SYSREG(3,0,c0,c2,0) +#define HSR_SYSREG_ID_ISAR1_EL1 HSR_SYSREG(3,0,c0,c2,1) +#define HSR_SYSREG_ID_ISAR2_EL1 HSR_SYSREG(3,0,c0,c2,2) +#define HSR_SYSREG_ID_ISAR3_EL1 HSR_SYSREG(3,0,c0,c2,3) +#define HSR_SYSREG_ID_ISAR4_EL1 HSR_SYSREG(3,0,c0,c2,4) +#define HSR_SYSREG_ID_ISAR5_EL1 HSR_SYSREG(3,0,c0,c2,5) +#define HSR_SYSREG_ID_ISAR6_EL1 HSR_SYSREG(3,0,c0,c2,7) +#define HSR_SYSREG_MVFR0_EL1 HSR_SYSREG(3,0,c0,c3,0) +#define HSR_SYSREG_MVFR1_EL1 HSR_SYSREG(3,0,c0,c3,1) +#define HSR_SYSREG_MVFR2_EL1 HSR_SYSREG(3,0,c0,c3,2) + +#define HSR_SYSREG_ID_AA64PFR0_EL1 HSR_SYSREG(3,0,c0,c4,0) +#define HSR_SYSREG_ID_AA64PFR1_EL1 HSR_SYSREG(3,0,c0,c4,1) +#define HSR_SYSREG_ID_AA64DFR0_EL1 HSR_SYSREG(3,0,c0,c5,0) +#define HSR_SYSREG_ID_AA64DFR1_EL1 HSR_SYSREG(3,0,c0,c5,1) +#define HSR_SYSREG_ID_AA64ISAR0_EL1 HSR_SYSREG(3,0,c0,c6,0) +#define HSR_SYSREG_ID_AA64ISAR1_EL1 HSR_SYSREG(3,0,c0,c6,1) +#define HSR_SYSREG_ID_AA64MMFR0_EL1 HSR_SYSREG(3,0,c0,c7,0) +#define HSR_SYSREG_ID_AA64MMFR1_EL1 HSR_SYSREG(3,0,c0,c7,1) +#define HSR_SYSREG_ID_AA64MMFR2_EL1 HSR_SYSREG(3,0,c0,c7,2) +#define HSR_SYSREG_ID_AA64AFR0_EL1 HSR_SYSREG(3,0,c0,c5,4) +#define HSR_SYSREG_ID_AA64AFR1_EL1 HSR_SYSREG(3,0,c0,c5,5) +#define HSR_SYSREG_ID_AA64ZFR0_EL1 HSR_SYSREG(3,0,c0,c4,4) + +#endif /* __ASM_ARM_ARM64_HSR_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/insn.h b/xen/arch/arm/include/asm/arm64/insn.h new file mode 100644 index 0000000000..4e0d364d41 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/insn.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu + * + * Copyright (C) 2014 Zi Shen Lim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ARCH_ARM_ARM64_INSN +#define __ARCH_ARM_ARM64_INSN + +#include +#include + +enum aarch64_insn_hint_op { + AARCH64_INSN_HINT_NOP = 0x0 << 5, + AARCH64_INSN_HINT_YIELD = 0x1 << 5, + AARCH64_INSN_HINT_WFE = 0x2 << 5, + AARCH64_INSN_HINT_WFI = 0x3 << 5, + AARCH64_INSN_HINT_SEV = 0x4 << 5, + AARCH64_INSN_HINT_SEVL = 0x5 << 5, +}; + +enum aarch64_insn_imm_type { + AARCH64_INSN_IMM_ADR, + AARCH64_INSN_IMM_26, + AARCH64_INSN_IMM_19, + AARCH64_INSN_IMM_16, + AARCH64_INSN_IMM_14, + AARCH64_INSN_IMM_12, + AARCH64_INSN_IMM_9, + AARCH64_INSN_IMM_7, + AARCH64_INSN_IMM_6, + AARCH64_INSN_IMM_S, + AARCH64_INSN_IMM_R, + AARCH64_INSN_IMM_MAX +}; + +enum aarch64_insn_branch_type { + AARCH64_INSN_BRANCH_NOLINK, + AARCH64_INSN_BRANCH_LINK, + AARCH64_INSN_BRANCH_RETURN, + AARCH64_INSN_BRANCH_COMP_ZERO, + AARCH64_INSN_BRANCH_COMP_NONZERO, +}; + +#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ +static always_inline bool aarch64_insn_is_##abbr(u32 code) \ +{ return (code & (mask)) == (val); } \ +static always_inline u32 aarch64_insn_get_##abbr##_value(void) \ +{ return (val); } + +__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) +__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) +__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000) +__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000) +__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000) +__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000) +__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) +__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) + +bool aarch64_insn_is_branch_imm(u32 insn); + +u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); +u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm); + +s32 aarch64_get_branch_offset(u32 insn); +u32 aarch64_set_branch_offset(u32 insn, s32 offset); + +u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); +u32 aarch64_insn_gen_nop(void); + +/* Wrapper for common code */ +static inline bool insn_is_branch_imm(u32 insn) +{ + return aarch64_insn_is_branch_imm(insn); +} + +static inline s32 insn_get_branch_offset(u32 insn) +{ + return aarch64_get_branch_offset(insn); +} + +static inline u32 insn_set_branch_offset(u32 insn, s32 offset) +{ + return aarch64_set_branch_offset(insn, offset); +} + +#endif /* !__ARCH_ARM_ARM64_INSN */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/io.h b/xen/arch/arm/include/asm/arm64/io.h new file mode 100644 index 0000000000..30bfc78d9e --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/io.h @@ -0,0 +1,148 @@ +/* + * Based on linux arch/arm64/include/asm/io.h which is in turn + * Based on arch/arm/include/asm/io.h + * + * Copyright (C) 1996-2000 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ARM_ARM64_IO_H +#define _ARM_ARM64_IO_H + +#include +#include +#include + +/* + * Generic IO read/write. These perform native-endian accesses. + */ +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +{ + asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + asm volatile(ALTERNATIVE("ldrb %w0, [%1]", + "ldarb %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); + return val; +} + +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + asm volatile(ALTERNATIVE("ldrh %w0, [%1]", + "ldarh %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); + return val; +} + +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + asm volatile(ALTERNATIVE("ldr %w0, [%1]", + "ldar %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); + return val; +} + +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + u64 val; + asm volatile(ALTERNATIVE("ldr %0, [%1]", + "ldar %0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); + return val; +} + +/* IO barriers */ +#define __iormb() rmb() +#define __iowmb() wmb() + +#define mmiowb() do { } while (0) + +/* + * Relaxed I/O memory access primitives. These follow the Device memory + * ordering rules but do not guarantee any ordering relative to Normal memory + * accesses. + */ +#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) +#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) +#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) +#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) + +#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) +#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) +#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) +#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) + +/* + * Emulate x86 io ports for ARM. + */ +static inline int emulate_read(u64 addr) +{ + printk(XENLOG_G_WARNING "Can't access IO %lx\n", addr); + return 0; +} + +static inline void emulate_write(u64 addr) +{ + printk(XENLOG_G_WARNING "Can't access IO %lx\n", addr); +} + +#define inb(c) ( emulate_read(c) ) +#define inw(c) ( emulate_read(c) ) +#define inl(c) ( emulate_read(c) ) + +#define outb(v, c) ( emulate_write(c) ) +#define outw(v, c) ( emulate_write(c) ) +#define outl(v, c) ( emulate_write(c) ) + +#endif /* _ARM_ARM64_IO_H */ diff --git a/xen/arch/arm/include/asm/arm64/macros.h b/xen/arch/arm/include/asm/arm64/macros.h new file mode 100644 index 0000000000..5ad66efd6b --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/macros.h @@ -0,0 +1,36 @@ +#ifndef __ASM_ARM_ARM64_MACROS_H +#define __ASM_ARM_ARM64_MACROS_H + + /* + * @dst: Result of get_cpu_info() + */ + .macro adr_cpu_info, dst + add \dst, sp, #STACK_SIZE + and \dst, \dst, #~(STACK_SIZE - 1) + sub \dst, \dst, #CPUINFO_sizeof + .endm + + /* + * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id())) + * @sym: The name of the per-cpu variable + * @tmp: scratch register + */ + .macro ldr_this_cpu, dst, sym, tmp + ldr \dst, =per_cpu__\sym + mrs \tmp, tpidr_el2 + ldr \dst, [\dst, \tmp] + .endm + + .macro ret + /* ret opcode */ + .inst 0xd65f03c0 + sb + .endm + +/* + * Register aliases. + */ +lr .req x30 /* link register */ + +#endif /* __ASM_ARM_ARM64_MACROS_H */ + diff --git a/xen/arch/arm/include/asm/arm64/mm.h b/xen/arch/arm/include/asm/arm64/mm.h new file mode 100644 index 0000000000..d0a3be7e15 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/mm.h @@ -0,0 +1,23 @@ +#ifndef __ARM_ARM64_MM_H__ +#define __ARM_ARM64_MM_H__ + +/* + * On ARM64, all the RAM is currently direct mapped in Xen. + * Hence return always true. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + return true; +} + +#endif /* __ARM_ARM64_MM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/page.h b/xen/arch/arm/include/asm/arm64/page.h new file mode 100644 index 0000000000..0cba266373 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/page.h @@ -0,0 +1,103 @@ +#ifndef __ARM_ARM64_PAGE_H__ +#define __ARM_ARM64_PAGE_H__ + +#ifndef __ASSEMBLY__ + +#include + +/* Write a pagetable entry */ +static inline void write_pte(lpae_t *p, lpae_t pte) +{ + asm volatile ( + /* Ensure any writes have completed with the old mappings. */ + "dsb sy;" + "str %0, [%1];" /* Write the entry */ + "dsb sy;" + : : "r" (pte.bits), "r" (p) : "memory"); +} + +/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */ +#define __invalidate_dcache_one(R) "dc ivac, %" #R ";" + +/* Inline ASM to flush dcache on register R (may be an inline asm operand) */ +#define __clean_dcache_one(R) \ + ALTERNATIVE("dc cvac, %" #R ";", \ + "dc civac, %" #R ";", \ + ARM64_WORKAROUND_CLEAN_CACHE) \ + +/* Inline ASM to clean and invalidate dcache on register R (may be an + * inline asm operand) */ +#define __clean_and_invalidate_dcache_one(R) "dc civac, %" #R ";" + +/* Invalidate all instruction caches in Inner Shareable domain to PoU */ +static inline void invalidate_icache(void) +{ + asm volatile ("ic ialluis"); + dsb(ish); /* Ensure completion of the flush I-cache */ + isb(); +} + +/* Invalidate all instruction caches on the local processor to PoU */ +static inline void invalidate_icache_local(void) +{ + asm volatile ("ic iallu"); + dsb(nsh); /* Ensure completion of the I-cache flush */ + isb(); +} + +/* Ask the MMU to translate a VA for us */ +static inline uint64_t __va_to_par(vaddr_t va) +{ + uint64_t par, tmp = READ_SYSREG64(PAR_EL1); + + asm volatile ("at s1e2r, %0;" : : "r" (va)); + isb(); + par = READ_SYSREG64(PAR_EL1); + WRITE_SYSREG64(tmp, PAR_EL1); + return par; +} + +/* Ask the MMU to translate a Guest VA for us */ +static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags) +{ + uint64_t par, tmp = READ_SYSREG64(PAR_EL1); + + if ( (flags & GV2M_WRITE) == GV2M_WRITE ) + asm volatile ("at s12e1w, %0;" : : "r" (va)); + else + asm volatile ("at s12e1r, %0;" : : "r" (va)); + isb(); + par = READ_SYSREG64(PAR_EL1); + WRITE_SYSREG64(tmp, PAR_EL1); + return par; +} + +static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags) +{ + uint64_t par, tmp = READ_SYSREG64(PAR_EL1); + + if ( (flags & GV2M_WRITE) == GV2M_WRITE ) + asm volatile ("at s1e1w, %0;" : : "r" (va)); + else + asm volatile ("at s1e1r, %0;" : : "r" (va)); + isb(); + par = READ_SYSREG64(PAR_EL1); + WRITE_SYSREG64(tmp, PAR_EL1); + return par; +} + +extern void clear_page(void *to); + +#endif /* __ASSEMBLY__ */ + +#endif /* __ARM_ARM64_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/processor.h b/xen/arch/arm/include/asm/arm64/processor.h new file mode 100644 index 0000000000..c749f80ad9 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/processor.h @@ -0,0 +1,99 @@ +#ifndef __ASM_ARM_ARM64_PROCESSOR_H +#define __ASM_ARM_ARM64_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ + +#define __DECL_REG(n64, n32) union { \ + uint64_t n64; \ + uint32_t n32; \ +} + +/* On stack VCPU state */ +struct cpu_user_regs +{ + /* + * The mapping AArch64 <-> AArch32 is based on D1.20.1 in ARM DDI + * 0487A.d. + * + * AArch64 AArch32 + */ + __DECL_REG(x0, r0/*_usr*/); + __DECL_REG(x1, r1/*_usr*/); + __DECL_REG(x2, r2/*_usr*/); + __DECL_REG(x3, r3/*_usr*/); + __DECL_REG(x4, r4/*_usr*/); + __DECL_REG(x5, r5/*_usr*/); + __DECL_REG(x6, r6/*_usr*/); + __DECL_REG(x7, r7/*_usr*/); + __DECL_REG(x8, r8/*_usr*/); + __DECL_REG(x9, r9/*_usr*/); + __DECL_REG(x10, r10/*_usr*/); + __DECL_REG(x11 , r11/*_usr*/); + __DECL_REG(x12, r12/*_usr*/); + + __DECL_REG(x13, /* r13_usr */ sp_usr); + __DECL_REG(x14, /* r14_usr */ lr_usr); + + __DECL_REG(x15, /* r13_hyp */ __unused_sp_hyp); + + __DECL_REG(x16, /* r14_irq */ lr_irq); + __DECL_REG(x17, /* r13_irq */ sp_irq); + + __DECL_REG(x18, /* r14_svc */ lr_svc); + __DECL_REG(x19, /* r13_svc */ sp_svc); + + __DECL_REG(x20, /* r14_abt */ lr_abt); + __DECL_REG(x21, /* r13_abt */ sp_abt); + + __DECL_REG(x22, /* r14_und */ lr_und); + __DECL_REG(x23, /* r13_und */ sp_und); + + __DECL_REG(x24, r8_fiq); + __DECL_REG(x25, r9_fiq); + __DECL_REG(x26, r10_fiq); + __DECL_REG(x27, r11_fiq); + __DECL_REG(x28, r12_fiq); + __DECL_REG(/* x29 */ fp, /* r13_fiq */ sp_fiq); + + __DECL_REG(/* x30 */ lr, /* r14_fiq */ lr_fiq); + + register_t sp; /* Valid for hypervisor frames */ + + /* Return address and mode */ + __DECL_REG(pc, pc32); /* ELR_EL2 */ + uint64_t cpsr; /* SPSR_EL2 */ + uint64_t hsr; /* ESR_EL2 */ + + /* The kernel frame should be 16-byte aligned. */ + uint64_t pad0; + + /* Outer guest frame only from here on... */ + + union { + uint64_t spsr_el1; /* AArch64 */ + uint32_t spsr_svc; /* AArch32 */ + }; + + /* AArch32 guests only */ + uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; + + /* AArch64 guests only */ + uint64_t sp_el0; + uint64_t sp_el1, elr_el1; +}; + +#undef __DECL_REG + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARM_ARM64_PROCESSOR_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/sysregs.h b/xen/arch/arm/include/asm/arm64/sysregs.h new file mode 100644 index 0000000000..d7e4772f21 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/sysregs.h @@ -0,0 +1,423 @@ +#ifndef __ASM_ARM_ARM64_SYSREGS_H +#define __ASM_ARM_ARM64_SYSREGS_H + +#include + +/* + * GIC System register assembly aliases picked from kernel + */ +#define ICC_PMR_EL1 S3_0_C4_C6_0 +#define ICC_DIR_EL1 S3_0_C12_C11_1 +#define ICC_SGI1R_EL1 S3_0_C12_C11_5 +#define ICC_EOIR1_EL1 S3_0_C12_C12_1 +#define ICC_IAR1_EL1 S3_0_C12_C12_0 +#define ICC_BPR1_EL1 S3_0_C12_C12_3 +#define ICC_CTLR_EL1 S3_0_C12_C12_4 +#define ICC_SRE_EL1 S3_0_C12_C12_5 +#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7 + +#define ICH_VSEIR_EL2 S3_4_C12_C9_4 +#define ICC_SRE_EL2 S3_4_C12_C9_5 +#define ICH_HCR_EL2 S3_4_C12_C11_0 +#define ICH_VTR_EL2 S3_4_C12_C11_1 +#define ICH_MISR_EL2 S3_4_C12_C11_2 +#define ICH_EISR_EL2 S3_4_C12_C11_3 +#define ICH_ELSR_EL2 S3_4_C12_C11_5 +#define ICH_VMCR_EL2 S3_4_C12_C11_7 + +#define __LR0_EL2(x) S3_4_C12_C12_ ## x +#define __LR8_EL2(x) S3_4_C12_C13_ ## x + +#define ICH_LR0_EL2 __LR0_EL2(0) +#define ICH_LR1_EL2 __LR0_EL2(1) +#define ICH_LR2_EL2 __LR0_EL2(2) +#define ICH_LR3_EL2 __LR0_EL2(3) +#define ICH_LR4_EL2 __LR0_EL2(4) +#define ICH_LR5_EL2 __LR0_EL2(5) +#define ICH_LR6_EL2 __LR0_EL2(6) +#define ICH_LR7_EL2 __LR0_EL2(7) +#define ICH_LR8_EL2 __LR8_EL2(0) +#define ICH_LR9_EL2 __LR8_EL2(1) +#define ICH_LR10_EL2 __LR8_EL2(2) +#define ICH_LR11_EL2 __LR8_EL2(3) +#define ICH_LR12_EL2 __LR8_EL2(4) +#define ICH_LR13_EL2 __LR8_EL2(5) +#define ICH_LR14_EL2 __LR8_EL2(6) +#define ICH_LR15_EL2 __LR8_EL2(7) + +#define __AP0Rx_EL2(x) S3_4_C12_C8_ ## x +#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) +#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) +#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) +#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) + +#define __AP1Rx_EL2(x) S3_4_C12_C9_ ## x +#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) +#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) +#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) +#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) + +/* + * Define ID coprocessor registers if they are not + * already defined by the compiler. + * + * Values picked from linux kernel + */ +#ifndef ID_AA64MMFR2_EL1 +#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2 +#endif +#ifndef ID_PFR2_EL1 +#define ID_PFR2_EL1 S3_0_C0_C3_4 +#endif +#ifndef ID_MMFR4_EL1 +#define ID_MMFR4_EL1 S3_0_C0_C2_6 +#endif +#ifndef ID_MMFR5_EL1 +#define ID_MMFR5_EL1 S3_0_C0_C3_6 +#endif +#ifndef ID_ISAR6_EL1 +#define ID_ISAR6_EL1 S3_0_C0_C2_7 +#endif +#ifndef ID_AA64ZFR0_EL1 +#define ID_AA64ZFR0_EL1 S3_0_C0_C4_4 +#endif +#ifndef ID_DFR1_EL1 +#define ID_DFR1_EL1 S3_0_C0_C3_5 +#endif + +/* ID registers (imported from arm64/include/asm/sysreg.h in Linux) */ + +/* id_aa64isar0 */ +#define ID_AA64ISAR0_RNDR_SHIFT 60 +#define ID_AA64ISAR0_TLB_SHIFT 56 +#define ID_AA64ISAR0_TS_SHIFT 52 +#define ID_AA64ISAR0_FHM_SHIFT 48 +#define ID_AA64ISAR0_DP_SHIFT 44 +#define ID_AA64ISAR0_SM4_SHIFT 40 +#define ID_AA64ISAR0_SM3_SHIFT 36 +#define ID_AA64ISAR0_SHA3_SHIFT 32 +#define ID_AA64ISAR0_RDM_SHIFT 28 +#define ID_AA64ISAR0_ATOMICS_SHIFT 20 +#define ID_AA64ISAR0_CRC32_SHIFT 16 +#define ID_AA64ISAR0_SHA2_SHIFT 12 +#define ID_AA64ISAR0_SHA1_SHIFT 8 +#define ID_AA64ISAR0_AES_SHIFT 4 + +#define ID_AA64ISAR0_TLB_RANGE_NI 0x0 +#define ID_AA64ISAR0_TLB_RANGE 0x2 + +/* id_aa64isar1 */ +#define ID_AA64ISAR1_I8MM_SHIFT 52 +#define ID_AA64ISAR1_DGH_SHIFT 48 +#define ID_AA64ISAR1_BF16_SHIFT 44 +#define ID_AA64ISAR1_SPECRES_SHIFT 40 +#define ID_AA64ISAR1_SB_SHIFT 36 +#define ID_AA64ISAR1_FRINTTS_SHIFT 32 +#define ID_AA64ISAR1_GPI_SHIFT 28 +#define ID_AA64ISAR1_GPA_SHIFT 24 +#define ID_AA64ISAR1_LRCPC_SHIFT 20 +#define ID_AA64ISAR1_FCMA_SHIFT 16 +#define ID_AA64ISAR1_JSCVT_SHIFT 12 +#define ID_AA64ISAR1_API_SHIFT 8 +#define ID_AA64ISAR1_APA_SHIFT 4 +#define ID_AA64ISAR1_DPB_SHIFT 0 + +#define ID_AA64ISAR1_APA_NI 0x0 +#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 +#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3 +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4 +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5 +#define ID_AA64ISAR1_API_NI 0x0 +#define ID_AA64ISAR1_API_IMP_DEF 0x1 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5 +#define ID_AA64ISAR1_GPA_NI 0x0 +#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_GPI_NI 0x0 +#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 + +/* id_aa64pfr0 */ +#define ID_AA64PFR0_CSV3_SHIFT 60 +#define ID_AA64PFR0_CSV2_SHIFT 56 +#define ID_AA64PFR0_DIT_SHIFT 48 +#define ID_AA64PFR0_AMU_SHIFT 44 +#define ID_AA64PFR0_MPAM_SHIFT 40 +#define ID_AA64PFR0_SEL2_SHIFT 36 +#define ID_AA64PFR0_SVE_SHIFT 32 +#define ID_AA64PFR0_RAS_SHIFT 28 +#define ID_AA64PFR0_GIC_SHIFT 24 +#define ID_AA64PFR0_ASIMD_SHIFT 20 +#define ID_AA64PFR0_FP_SHIFT 16 +#define ID_AA64PFR0_EL3_SHIFT 12 +#define ID_AA64PFR0_EL2_SHIFT 8 +#define ID_AA64PFR0_EL1_SHIFT 4 +#define ID_AA64PFR0_EL0_SHIFT 0 + +#define ID_AA64PFR0_AMU 0x1 +#define ID_AA64PFR0_SVE 0x1 +#define ID_AA64PFR0_RAS_V1 0x1 +#define ID_AA64PFR0_FP_NI 0xf +#define ID_AA64PFR0_FP_SUPPORTED 0x0 +#define ID_AA64PFR0_ASIMD_NI 0xf +#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 +#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL1_32BIT_64BIT 0x2 +#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2 + +/* id_aa64pfr1 */ +#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 +#define ID_AA64PFR1_RASFRAC_SHIFT 12 +#define ID_AA64PFR1_MTE_SHIFT 8 +#define ID_AA64PFR1_SSBS_SHIFT 4 +#define ID_AA64PFR1_BT_SHIFT 0 + +#define ID_AA64PFR1_SSBS_PSTATE_NI 0 +#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 +#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 +#define ID_AA64PFR1_BT_BTI 0x1 + +#define ID_AA64PFR1_MTE_NI 0x0 +#define ID_AA64PFR1_MTE_EL0 0x1 +#define ID_AA64PFR1_MTE 0x2 + +/* id_aa64zfr0 */ +#define ID_AA64ZFR0_F64MM_SHIFT 56 +#define ID_AA64ZFR0_F32MM_SHIFT 52 +#define ID_AA64ZFR0_I8MM_SHIFT 44 +#define ID_AA64ZFR0_SM4_SHIFT 40 +#define ID_AA64ZFR0_SHA3_SHIFT 32 +#define ID_AA64ZFR0_BF16_SHIFT 20 +#define ID_AA64ZFR0_BITPERM_SHIFT 16 +#define ID_AA64ZFR0_AES_SHIFT 4 +#define ID_AA64ZFR0_SVEVER_SHIFT 0 + +#define ID_AA64ZFR0_F64MM 0x1 +#define ID_AA64ZFR0_F32MM 0x1 +#define ID_AA64ZFR0_I8MM 0x1 +#define ID_AA64ZFR0_BF16 0x1 +#define ID_AA64ZFR0_SM4 0x1 +#define ID_AA64ZFR0_SHA3 0x1 +#define ID_AA64ZFR0_BITPERM 0x1 +#define ID_AA64ZFR0_AES 0x1 +#define ID_AA64ZFR0_AES_PMULL 0x2 +#define ID_AA64ZFR0_SVEVER_SVE2 0x1 + +/* id_aa64mmfr0 */ +#define ID_AA64MMFR0_ECV_SHIFT 60 +#define ID_AA64MMFR0_FGT_SHIFT 56 +#define ID_AA64MMFR0_EXS_SHIFT 44 +#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 +#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 +#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 +#define ID_AA64MMFR0_TGRAN4_SHIFT 28 +#define ID_AA64MMFR0_TGRAN64_SHIFT 24 +#define ID_AA64MMFR0_TGRAN16_SHIFT 20 +#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 +#define ID_AA64MMFR0_SNSMEM_SHIFT 12 +#define ID_AA64MMFR0_BIGENDEL_SHIFT 8 +#define ID_AA64MMFR0_ASID_SHIFT 4 +#define ID_AA64MMFR0_PARANGE_SHIFT 0 + +#define ID_AA64MMFR0_TGRAN4_NI 0xf +#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0 +#define ID_AA64MMFR0_TGRAN64_NI 0xf +#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 +#define ID_AA64MMFR0_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 +#define ID_AA64MMFR0_PARANGE_48 0x5 +#define ID_AA64MMFR0_PARANGE_52 0x6 + +/* id_aa64mmfr1 */ +#define ID_AA64MMFR1_ETS_SHIFT 36 +#define ID_AA64MMFR1_TWED_SHIFT 32 +#define ID_AA64MMFR1_XNX_SHIFT 28 +#define ID_AA64MMFR1_SPECSEI_SHIFT 24 +#define ID_AA64MMFR1_PAN_SHIFT 20 +#define ID_AA64MMFR1_LOR_SHIFT 16 +#define ID_AA64MMFR1_HPD_SHIFT 12 +#define ID_AA64MMFR1_VHE_SHIFT 8 +#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 +#define ID_AA64MMFR1_HADBS_SHIFT 0 + +#define ID_AA64MMFR1_VMIDBITS_8 0 +#define ID_AA64MMFR1_VMIDBITS_16 2 + +/* id_aa64mmfr2 */ +#define ID_AA64MMFR2_E0PD_SHIFT 60 +#define ID_AA64MMFR2_EVT_SHIFT 56 +#define ID_AA64MMFR2_BBM_SHIFT 52 +#define ID_AA64MMFR2_TTL_SHIFT 48 +#define ID_AA64MMFR2_FWB_SHIFT 40 +#define ID_AA64MMFR2_IDS_SHIFT 36 +#define ID_AA64MMFR2_AT_SHIFT 32 +#define ID_AA64MMFR2_ST_SHIFT 28 +#define ID_AA64MMFR2_NV_SHIFT 24 +#define ID_AA64MMFR2_CCIDX_SHIFT 20 +#define ID_AA64MMFR2_LVA_SHIFT 16 +#define ID_AA64MMFR2_IESB_SHIFT 12 +#define ID_AA64MMFR2_LSM_SHIFT 8 +#define ID_AA64MMFR2_UAO_SHIFT 4 +#define ID_AA64MMFR2_CNP_SHIFT 0 + +/* id_aa64dfr0 */ +#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 +#define ID_AA64DFR0_PMSVER_SHIFT 32 +#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 +#define ID_AA64DFR0_WRPS_SHIFT 20 +#define ID_AA64DFR0_BRPS_SHIFT 12 +#define ID_AA64DFR0_PMUVER_SHIFT 8 +#define ID_AA64DFR0_TRACEVER_SHIFT 4 +#define ID_AA64DFR0_DEBUGVER_SHIFT 0 + +#define ID_AA64DFR0_PMUVER_8_0 0x1 +#define ID_AA64DFR0_PMUVER_8_1 0x4 +#define ID_AA64DFR0_PMUVER_8_4 0x5 +#define ID_AA64DFR0_PMUVER_8_5 0x6 +#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf + +#define ID_DFR0_PERFMON_SHIFT 24 + +#define ID_DFR0_PERFMON_8_1 0x4 + +#define ID_ISAR4_SWP_FRAC_SHIFT 28 +#define ID_ISAR4_PSR_M_SHIFT 24 +#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20 +#define ID_ISAR4_BARRIER_SHIFT 16 +#define ID_ISAR4_SMC_SHIFT 12 +#define ID_ISAR4_WRITEBACK_SHIFT 8 +#define ID_ISAR4_WITHSHIFTS_SHIFT 4 +#define ID_ISAR4_UNPRIV_SHIFT 0 + +#define ID_DFR1_MTPMU_SHIFT 0 + +#define ID_ISAR0_DIVIDE_SHIFT 24 +#define ID_ISAR0_DEBUG_SHIFT 20 +#define ID_ISAR0_COPROC_SHIFT 16 +#define ID_ISAR0_CMPBRANCH_SHIFT 12 +#define ID_ISAR0_BITFIELD_SHIFT 8 +#define ID_ISAR0_BITCOUNT_SHIFT 4 +#define ID_ISAR0_SWAP_SHIFT 0 + +#define ID_ISAR5_RDM_SHIFT 24 +#define ID_ISAR5_CRC32_SHIFT 16 +#define ID_ISAR5_SHA2_SHIFT 12 +#define ID_ISAR5_SHA1_SHIFT 8 +#define ID_ISAR5_AES_SHIFT 4 +#define ID_ISAR5_SEVL_SHIFT 0 + +#define ID_ISAR6_I8MM_SHIFT 24 +#define ID_ISAR6_BF16_SHIFT 20 +#define ID_ISAR6_SPECRES_SHIFT 16 +#define ID_ISAR6_SB_SHIFT 12 +#define ID_ISAR6_FHM_SHIFT 8 +#define ID_ISAR6_DP_SHIFT 4 +#define ID_ISAR6_JSCVT_SHIFT 0 + +#define ID_MMFR0_INNERSHR_SHIFT 28 +#define ID_MMFR0_FCSE_SHIFT 24 +#define ID_MMFR0_AUXREG_SHIFT 20 +#define ID_MMFR0_TCM_SHIFT 16 +#define ID_MMFR0_SHARELVL_SHIFT 12 +#define ID_MMFR0_OUTERSHR_SHIFT 8 +#define ID_MMFR0_PMSA_SHIFT 4 +#define ID_MMFR0_VMSA_SHIFT 0 + +#define ID_MMFR4_EVT_SHIFT 28 +#define ID_MMFR4_CCIDX_SHIFT 24 +#define ID_MMFR4_LSM_SHIFT 20 +#define ID_MMFR4_HPDS_SHIFT 16 +#define ID_MMFR4_CNP_SHIFT 12 +#define ID_MMFR4_XNX_SHIFT 8 +#define ID_MMFR4_AC2_SHIFT 4 +#define ID_MMFR4_SPECSEI_SHIFT 0 + +#define ID_MMFR5_ETS_SHIFT 0 + +#define ID_PFR0_DIT_SHIFT 24 +#define ID_PFR0_CSV2_SHIFT 16 +#define ID_PFR0_STATE3_SHIFT 12 +#define ID_PFR0_STATE2_SHIFT 8 +#define ID_PFR0_STATE1_SHIFT 4 +#define ID_PFR0_STATE0_SHIFT 0 + +#define ID_DFR0_PERFMON_SHIFT 24 +#define ID_DFR0_MPROFDBG_SHIFT 20 +#define ID_DFR0_MMAPTRC_SHIFT 16 +#define ID_DFR0_COPTRC_SHIFT 12 +#define ID_DFR0_MMAPDBG_SHIFT 8 +#define ID_DFR0_COPSDBG_SHIFT 4 +#define ID_DFR0_COPDBG_SHIFT 0 + +#define ID_PFR2_SSBS_SHIFT 4 +#define ID_PFR2_CSV3_SHIFT 0 + +#define MVFR0_FPROUND_SHIFT 28 +#define MVFR0_FPSHVEC_SHIFT 24 +#define MVFR0_FPSQRT_SHIFT 20 +#define MVFR0_FPDIVIDE_SHIFT 16 +#define MVFR0_FPTRAP_SHIFT 12 +#define MVFR0_FPDP_SHIFT 8 +#define MVFR0_FPSP_SHIFT 4 +#define MVFR0_SIMD_SHIFT 0 + +#define MVFR1_SIMDFMAC_SHIFT 28 +#define MVFR1_FPHP_SHIFT 24 +#define MVFR1_SIMDHP_SHIFT 20 +#define MVFR1_SIMDSP_SHIFT 16 +#define MVFR1_SIMDINT_SHIFT 12 +#define MVFR1_SIMDLS_SHIFT 8 +#define MVFR1_FPDNAN_SHIFT 4 +#define MVFR1_FPFTZ_SHIFT 0 + +#define ID_PFR1_GIC_SHIFT 28 +#define ID_PFR1_VIRT_FRAC_SHIFT 24 +#define ID_PFR1_SEC_FRAC_SHIFT 20 +#define ID_PFR1_GENTIMER_SHIFT 16 +#define ID_PFR1_VIRTUALIZATION_SHIFT 12 +#define ID_PFR1_MPROGMOD_SHIFT 8 +#define ID_PFR1_SECURITY_SHIFT 4 +#define ID_PFR1_PROGMOD_SHIFT 0 + +#define MVFR2_FPMISC_SHIFT 4 +#define MVFR2_SIMDMISC_SHIFT 0 + +#define DCZID_DZP_SHIFT 4 +#define DCZID_BS_SHIFT 0 + +/* + * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which + * are reserved by the SVE architecture for future expansion of the LEN + * field, with compatible semantics. + */ +#define ZCR_ELx_LEN_SHIFT 0 +#define ZCR_ELx_LEN_SIZE 9 +#define ZCR_ELx_LEN_MASK 0x1ff + +/* Access to system registers */ + +#define WRITE_SYSREG64(v, name) do { \ + uint64_t _r = v; \ + asm volatile("msr "__stringify(name)", %0" : : "r" (_r)); \ +} while (0) +#define READ_SYSREG64(name) ({ \ + uint64_t _r; \ + asm volatile("mrs %0, "__stringify(name) : "=r" (_r)); \ + _r; }) + +#define READ_SYSREG(name) READ_SYSREG64(name) +#define WRITE_SYSREG(v, name) WRITE_SYSREG64(v, name) + +#endif /* _ASM_ARM_ARM64_SYSREGS_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/system.h b/xen/arch/arm/include/asm/arm64/system.h new file mode 100644 index 0000000000..2e36573ac6 --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/system.h @@ -0,0 +1,91 @@ +/* Portions taken from Linux arch arm64 */ +#ifndef __ASM_ARM64_SYSTEM_H +#define __ASM_ARM64_SYSTEM_H + +#include + +/* Uses uimm4 as a bitmask to select the clearing of one or more of + * the DAIF exception mask bits: + * bit 3 selects the D mask, + * bit 2 the A mask, + * bit 1 the I mask and + * bit 0 the F mask. +*/ + +#define local_fiq_disable() asm volatile ( "msr daifset, #1\n" ::: "memory" ) +#define local_fiq_enable() asm volatile ( "msr daifclr, #1\n" ::: "memory" ) +#define local_irq_disable() asm volatile ( "msr daifset, #2\n" ::: "memory" ) +#define local_irq_enable() asm volatile ( "msr daifclr, #2\n" ::: "memory" ) +#define local_abort_disable() asm volatile ( "msr daifset, #4\n" ::: "memory" ) +#define local_abort_enable() asm volatile ( "msr daifclr, #4\n" ::: "memory" ) + +#define local_save_flags(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile( \ + "mrs %0, daif // local_save_flags\n" \ + : "=r" (x) \ + : \ + : "memory"); \ +}) + +#define local_irq_save(x) \ +({ \ + local_save_flags(x); \ + local_irq_disable(); \ +}) +#define local_irq_restore(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( \ + "msr daif, %0 // local_irq_restore" \ + : \ + : "r" (x) \ + : "memory"); \ +}) + +static inline int local_irq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_IRQ_MASK); +} + +static inline int local_fiq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_FIQ_MASK); +} + +#define csdb() asm volatile ( "hint #20" : : : "memory" ) + +/* + * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz + * and 0 otherwise. + */ +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile ( "cmp %1, %2\n" + "sbc %0, xzr, xzr\n" + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc" ); + csdb(); + + return mask; +} +#define array_index_mask_nospec array_index_mask_nospec + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/arm64/traps.h b/xen/arch/arm/include/asm/arm64/traps.h new file mode 100644 index 0000000000..2379b578cb --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/traps.h @@ -0,0 +1,18 @@ +#ifndef __ASM_ARM64_TRAPS__ +#define __ASM_ARM64_TRAPS__ + +void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len); + +void do_sysreg(struct cpu_user_regs *regs, + const union hsr hsr); + +#endif /* __ASM_ARM64_TRAPS__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ + diff --git a/xen/arch/arm/include/asm/arm64/vfp.h b/xen/arch/arm/include/asm/arm64/vfp.h new file mode 100644 index 0000000000..e6e8c363bc --- /dev/null +++ b/xen/arch/arm/include/asm/arm64/vfp.h @@ -0,0 +1,23 @@ +#ifndef _ARM_ARM64_VFP_H +#define _ARM_ARM64_VFP_H + +/* ARM64 VFP instruction requires fpregs address to be 128-byte aligned */ +#define __vfp_aligned __attribute__((aligned(16))) + +struct vfp_state +{ + uint64_t fpregs[64] __vfp_aligned; + register_t fpcr; + register_t fpexc32_el2; + register_t fpsr; +}; + +#endif /* _ARM_ARM64_VFP_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/asm_defns.h b/xen/arch/arm/include/asm/asm_defns.h new file mode 100644 index 0000000000..29a9dbb002 --- /dev/null +++ b/xen/arch/arm/include/asm/asm_defns.h @@ -0,0 +1,44 @@ +#ifndef __ARM_ASM_DEFNS_H__ +#define __ARM_ASM_DEFNS_H__ + +#ifndef COMPILE_OFFSETS +/* NB. Auto-generated from arch/.../asm-offsets.c */ +#include +#endif +#include + +/* Macros for generic assembly code */ +#if defined(CONFIG_ARM_32) +# define __OP32 +# define ASM_REG(index) asm("r" # index) +#elif defined(CONFIG_ARM_64) +# define __OP32 "w" +/* + * Clang < 8.0 doesn't support register alllocation using the syntax rN. + * See https://reviews.llvm.org/rL328829. + */ +# define ASM_REG(index) asm("x" # index) +#else +# error "unknown ARM variant" +#endif + +#define RODATA_STR(label, msg) \ +.pushsection .rodata.str, "aMS", %progbits, 1 ; \ +label: .asciz msg; \ +.popsection + +#define ASM_INT(label, val) \ + .p2align 2; \ +label: .long (val); \ + .size label, . - label; \ + .type label, %object + +#endif /* __ARM_ASM_DEFNS_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/atomic.h b/xen/arch/arm/include/asm/atomic.h new file mode 100644 index 0000000000..ac2798d095 --- /dev/null +++ b/xen/arch/arm/include/asm/atomic.h @@ -0,0 +1,236 @@ +#ifndef __ARCH_ARM_ATOMIC__ +#define __ARCH_ARM_ATOMIC__ + +#include +#include +#include + +#define build_atomic_read(name, size, width, type) \ +static inline type name(const volatile type *addr) \ +{ \ + type ret; \ + asm volatile("ldr" size " %" width(0) ",%1" \ + : "=r" (ret) \ + : "m" (*addr)); \ + return ret; \ +} + +#define build_atomic_write(name, size, width, type) \ +static inline void name(volatile type *addr, type val) \ +{ \ + asm volatile("str" size " %" width(1) ",%0" \ + : "=m" (*addr) \ + : "r" (val)); \ +} + +#define build_add_sized(name, size, width, type) \ +static inline void name(volatile type *addr, type val) \ +{ \ + type t; \ + asm volatile("ldr" size " %" width(1) ",%0\n" \ + "add %" width(1) ",%" width(1) ",%" width(2) "\n" \ + "str" size " %" width(1) ",%0" \ + : "+m" (*addr), "=&r" (t) \ + : "ri" (val)); \ +} + +#if defined (CONFIG_ARM_32) +#define BYTE(n) #n +#define WORD(n) #n +#define DWORD(n) "" #n ",%H" #n +#define PAIR "d" +#elif defined (CONFIG_ARM_64) +#define BYTE(n) "w" #n +#define WORD(n) "w" #n +#define DWORD(n) "" #n +#define PAIR "" +#endif + +build_atomic_read(read_u8_atomic, "b", BYTE, uint8_t) +build_atomic_read(read_u16_atomic, "h", WORD, uint16_t) +build_atomic_read(read_u32_atomic, "", WORD, uint32_t) +build_atomic_read(read_u64_atomic, PAIR, DWORD, uint64_t) +build_atomic_read(read_int_atomic, "", WORD, int) + +build_atomic_write(write_u8_atomic, "b", BYTE, uint8_t) +build_atomic_write(write_u16_atomic, "h", WORD, uint16_t) +build_atomic_write(write_u32_atomic, "", WORD, uint32_t) +build_atomic_write(write_u64_atomic, PAIR, DWORD, uint64_t) +build_atomic_write(write_int_atomic, "", WORD, int) + +build_add_sized(add_u8_sized, "b", BYTE, uint8_t) +build_add_sized(add_u16_sized, "h", WORD, uint16_t) +build_add_sized(add_u32_sized, "", WORD, uint32_t) + +#undef BYTE +#undef WORD +#undef DWORD +#undef PAIR + +#undef build_atomic_read +#undef build_atomic_write +#undef build_add_sized + +void __bad_atomic_read(const volatile void *p, void *res); +void __bad_atomic_size(void); + +static always_inline void read_atomic_size(const volatile void *p, + void *res, + unsigned int size) +{ + switch ( size ) + { + case 1: + *(uint8_t *)res = read_u8_atomic(p); + break; + case 2: + *(uint16_t *)res = read_u16_atomic(p); + break; + case 4: + *(uint32_t *)res = read_u32_atomic(p); + break; + case 8: + *(uint64_t *)res = read_u64_atomic(p); + break; + default: + __bad_atomic_read(p, res); + break; + } +} + +static always_inline void write_atomic_size(volatile void *p, + void *val, + unsigned int size) +{ + switch ( size ) + { + case 1: + write_u8_atomic(p, *(uint8_t *)val); + break; + case 2: + write_u16_atomic(p, *(uint16_t *)val); + break; + case 4: + write_u32_atomic(p, *(uint32_t *)val); + break; + case 8: + write_u64_atomic(p, *(uint64_t *)val); + break; + default: + __bad_atomic_size(); + break; + } +} + +#define read_atomic(p) ({ \ + union { typeof(*p) val; char c[0]; } x_; \ + read_atomic_size(p, x_.c, sizeof(*p)); \ + x_.val; \ +}) + +#define write_atomic(p, x) \ + do { \ + typeof(*p) x_ = (x); \ + write_atomic_size(p, &x_, sizeof(*p)); \ + } while ( false ) + +#define add_sized(p, x) ({ \ + typeof(*(p)) __x = (x); \ + switch ( sizeof(*(p)) ) \ + { \ + case 1: add_u8_sized((uint8_t *)(p), __x); break; \ + case 2: add_u16_sized((uint16_t *)(p), __x); break; \ + case 4: add_u32_sized((uint32_t *)(p), __x); break; \ + default: __bad_atomic_size(); break; \ + } \ +}) + +/* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ +static inline int atomic_read(const atomic_t *v) +{ + return *(volatile int *)&v->counter; +} + +static inline int _atomic_read(atomic_t v) +{ + return v.counter; +} + +static inline void atomic_set(atomic_t *v, int i) +{ + v->counter = i; +} + +static inline void _atomic_set(atomic_t *v, int i) +{ + v->counter = i; +} + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +static inline int atomic_sub_and_test(int i, atomic_t *v) +{ + return atomic_sub_return(i, v) == 0; +} + +static inline void atomic_inc(atomic_t *v) +{ + atomic_add(1, v); +} + +static inline int atomic_inc_return(atomic_t *v) +{ + return atomic_add_return(1, v); +} + +static inline int atomic_inc_and_test(atomic_t *v) +{ + return atomic_add_return(1, v) == 0; +} + +static inline void atomic_dec(atomic_t *v) +{ + atomic_sub(1, v); +} + +static inline int atomic_dec_return(atomic_t *v) +{ + return atomic_sub_return(1, v); +} + +static inline int atomic_dec_and_test(atomic_t *v) +{ + return atomic_sub_return(1, v) == 0; +} + +static inline int atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + return __atomic_add_unless(v, a, u); +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +#endif /* __ARCH_ARM_ATOMIC__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/bitops.h b/xen/arch/arm/include/asm/bitops.h new file mode 100644 index 0000000000..71ae14cab3 --- /dev/null +++ b/xen/arch/arm/include/asm/bitops.h @@ -0,0 +1,192 @@ +/* + * Copyright 1995, Russell King. + * Various bits and pieces copyrights include: + * Linus Torvalds (test_bit). + * Big endian support: Copyright 2001, Nicolas Pitre + * reworked by rmk. + */ + +#ifndef _ARM_BITOPS_H +#define _ARM_BITOPS_H + +#include + +/* + * Non-atomic bit manipulation. + * + * Implemented using atomics to be interrupt safe. Could alternatively + * implement with local interrupt masking. + */ +#define __set_bit(n,p) set_bit(n,p) +#define __clear_bit(n,p) clear_bit(n,p) + +#define BITOP_BITS_PER_WORD 32 +#define BITOP_MASK(nr) (1UL << ((nr) % BITOP_BITS_PER_WORD)) +#define BITOP_WORD(nr) ((nr) / BITOP_BITS_PER_WORD) +#define BITS_PER_BYTE 8 + +#define ADDR (*(volatile int *) addr) +#define CONST_ADDR (*(const volatile int *) addr) + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +/* + * Atomic bitops + * + * The helpers below *should* only be used on memory shared between + * trusted threads or we know the memory cannot be accessed by another + * thread. + */ + +void set_bit(int nr, volatile void *p); +void clear_bit(int nr, volatile void *p); +void change_bit(int nr, volatile void *p); +int test_and_set_bit(int nr, volatile void *p); +int test_and_clear_bit(int nr, volatile void *p); +int test_and_change_bit(int nr, volatile void *p); + +void clear_mask16(uint16_t mask, volatile void *p); + +/* + * The helpers below may fail to update the memory if the action takes + * too long. + * + * @max_try: Maximum number of iterations + * + * The helpers will return true when the update has succeeded (i.e no + * timeout) and false if the update has failed. + */ +bool set_bit_timeout(int nr, volatile void *p, unsigned int max_try); +bool clear_bit_timeout(int nr, volatile void *p, unsigned int max_try); +bool change_bit_timeout(int nr, volatile void *p, unsigned int max_try); +bool test_and_set_bit_timeout(int nr, volatile void *p, + int *oldbit, unsigned int max_try); +bool test_and_clear_bit_timeout(int nr, volatile void *p, + int *oldbit, unsigned int max_try); +bool test_and_change_bit_timeout(int nr, volatile void *p, + int *oldbit, unsigned int max_try); +bool clear_mask16_timeout(uint16_t mask, volatile void *p, + unsigned int max_try); + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile void *addr) +{ + unsigned int mask = BITOP_MASK(nr); + volatile unsigned int *p = + ((volatile unsigned int *)addr) + BITOP_WORD(nr); + unsigned int old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile void *addr) +{ + unsigned int mask = BITOP_MASK(nr); + volatile unsigned int *p = + ((volatile unsigned int *)addr) + BITOP_WORD(nr); + unsigned int old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile void *addr) +{ + unsigned int mask = BITOP_MASK(nr); + volatile unsigned int *p = + ((volatile unsigned int *)addr) + BITOP_WORD(nr); + unsigned int old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile void *addr) +{ + const volatile unsigned int *p = (const volatile unsigned int *)addr; + return 1UL & (p[BITOP_WORD(nr)] >> (nr & (BITOP_BITS_PER_WORD-1))); +} + +/* + * On ARMv5 and above those functions can be implemented around + * the clz instruction for much better code efficiency. + */ + +static inline int fls(unsigned int x) +{ + int ret; + + if (__builtin_constant_p(x)) + return generic_fls(x); + + asm("clz\t%"__OP32"0, %"__OP32"1" : "=r" (ret) : "r" (x)); + return 32 - ret; +} + + +#define ffs(x) ({ unsigned int __t = (x); fls(__t & -__t); }) +#define ffsl(x) ({ unsigned long __t = (x); flsl(__t & -__t); }) + +/** + * find_first_set_bit - find the first set bit in @word + * @word: the word to search + * + * Returns the bit-number of the first set bit (first bit being 0). + * The input must *not* be zero. + */ +static inline unsigned int find_first_set_bit(unsigned long word) +{ + return ffsl(word) - 1; +} + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ +#define hweight64(x) generic_hweight64(x) +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +#endif /* _ARM_BITOPS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/bug.h b/xen/arch/arm/include/asm/bug.h new file mode 100644 index 0000000000..f4088d0913 --- /dev/null +++ b/xen/arch/arm/include/asm/bug.h @@ -0,0 +1,106 @@ +#ifndef __ARM_BUG_H__ +#define __ARM_BUG_H__ + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +#define BUG_DISP_WIDTH 24 +#define BUG_LINE_LO_WIDTH (31 - BUG_DISP_WIDTH) +#define BUG_LINE_HI_WIDTH (31 - BUG_DISP_WIDTH) + +struct bug_frame { + signed int loc_disp; /* Relative address to the bug address */ + signed int file_disp; /* Relative address to the filename */ + signed int msg_disp; /* Relative address to the predicate (for ASSERT) */ + uint16_t line; /* Line number */ + uint32_t pad0:16; /* Padding for 8-bytes align */ +}; + +#define bug_loc(b) ((const void *)(b) + (b)->loc_disp) +#define bug_file(b) ((const void *)(b) + (b)->file_disp); +#define bug_line(b) ((b)->line) +#define bug_msg(b) ((const char *)(b) + (b)->msg_disp) + +#define BUGFRAME_run_fn 0 +#define BUGFRAME_warn 1 +#define BUGFRAME_bug 2 +#define BUGFRAME_assert 3 + +#define BUGFRAME_NR 4 + +/* Many versions of GCC doesn't support the asm %c parameter which would + * be preferable to this unpleasantness. We use mergeable string + * sections to avoid multiple copies of the string appearing in the + * Xen image. BUGFRAME_run_fn needs to be handled separately. + */ +#define BUG_FRAME(type, line, file, has_msg, msg) do { \ + BUILD_BUG_ON((line) >> 16); \ + BUILD_BUG_ON((type) >= BUGFRAME_NR); \ + asm ("1:"BUG_INSTR"\n" \ + ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ + "2:\t.asciz " __stringify(file) "\n" \ + "3:\n" \ + ".if " #has_msg "\n" \ + "\t.asciz " #msg "\n" \ + ".endif\n" \ + ".popsection\n" \ + ".pushsection .bug_frames." __stringify(type) ", \"a\", %progbits\n"\ + "4:\n" \ + ".p2align 2\n" \ + ".long (1b - 4b)\n" \ + ".long (2b - 4b)\n" \ + ".long (3b - 4b)\n" \ + ".hword " __stringify(line) ", 0\n" \ + ".popsection"); \ +} while (0) + +/* + * GCC will not allow to use "i" when PIE is enabled (Xen doesn't set the + * flag but instead rely on the default value from the compiler). So the + * easiest way to implement run_in_exception_handler() is to pass the to + * be called function in a fixed register. + */ +#define run_in_exception_handler(fn) do { \ + asm ("mov " __stringify(BUG_FN_REG) ", %0\n" \ + "1:"BUG_INSTR"\n" \ + ".pushsection .bug_frames." __stringify(BUGFRAME_run_fn) "," \ + " \"a\", %%progbits\n" \ + "2:\n" \ + ".p2align 2\n" \ + ".long (1b - 2b)\n" \ + ".long 0, 0, 0\n" \ + ".popsection" :: "r" (fn) : __stringify(BUG_FN_REG) ); \ +} while (0) + +#define WARN() BUG_FRAME(BUGFRAME_warn, __LINE__, __FILE__, 0, "") + +#define BUG() do { \ + BUG_FRAME(BUGFRAME_bug, __LINE__, __FILE__, 0, ""); \ + unreachable(); \ +} while (0) + +#define assert_failed(msg) do { \ + BUG_FRAME(BUGFRAME_assert, __LINE__, __FILE__, 1, msg); \ + unreachable(); \ +} while (0) + +extern const struct bug_frame __start_bug_frames[], + __stop_bug_frames_0[], + __stop_bug_frames_1[], + __stop_bug_frames_2[], + __stop_bug_frames_3[]; + +#endif /* __ARM_BUG_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/byteorder.h b/xen/arch/arm/include/asm/byteorder.h new file mode 100644 index 0000000000..9c712c4788 --- /dev/null +++ b/xen/arch/arm/include/asm/byteorder.h @@ -0,0 +1,16 @@ +#ifndef __ASM_ARM_BYTEORDER_H__ +#define __ASM_ARM_BYTEORDER_H__ + +#define __BYTEORDER_HAS_U64__ + +#include + +#endif /* __ASM_ARM_BYTEORDER_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/cache.h b/xen/arch/arm/include/asm/cache.h new file mode 100644 index 0000000000..240b6ae0ea --- /dev/null +++ b/xen/arch/arm/include/asm/cache.h @@ -0,0 +1,19 @@ +#ifndef __ARCH_ARM_CACHE_H +#define __ARCH_ARM_CACHE_H + + +/* L1 cache line size */ +#define L1_CACHE_SHIFT (CONFIG_ARM_L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define __read_mostly __section(".data.read_mostly") + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/cadence-uart.h b/xen/arch/arm/include/asm/cadence-uart.h new file mode 100644 index 0000000000..48680eea4b --- /dev/null +++ b/xen/arch/arm/include/asm/cadence-uart.h @@ -0,0 +1,55 @@ +/* + * xen/include/asm-arm/cadence-uart.h + * + * Written by Edgar E. Iglesias + * Copyright (C) 2015 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_CADENCE_UART_H__ +#define __ASM_ARM_CADENCE_UART_H__ + +#define R_UART_CR 0x00 +#define UART_CR_RX_RST 0x01 +#define UART_CR_TX_RST 0x02 +#define UART_CR_RX_ENABLE 0x04 +#define UART_CR_RX_DISABLE 0x08 +#define UART_CR_TX_ENABLE 0x10 +#define UART_CR_TX_DISABLE 0x20 + +#define R_UART_MR 0x04 +#define UART_MR_NO_PARITY 0x20 + +#define R_UART_IER 0x08 +#define R_UART_IDR 0x0C +#define R_UART_IMR 0x10 +#define R_UART_CISR 0x14 +#define R_UART_RTRIG 0x20 +#define R_UART_SR 0x2C +#define UART_SR_INTR_RTRIG 0x01 +#define UART_SR_INTR_REMPTY 0x02 +#define UART_SR_INTR_TEMPTY 0x08 +#define UART_SR_INTR_TFUL 0x10 + +#define R_UART_TX 0x30 +#define R_UART_RX 0x30 + +#endif /* __ASM_ARM_CADENCE_UART_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/config.h b/xen/arch/arm/include/asm/config.h new file mode 100644 index 0000000000..c7b7791201 --- /dev/null +++ b/xen/arch/arm/include/asm/config.h @@ -0,0 +1,207 @@ +/****************************************************************************** + * config.h + * + * A Linux-style configuration list. + */ + +#ifndef __ARM_CONFIG_H__ +#define __ARM_CONFIG_H__ + +#if defined(CONFIG_ARM_64) +# define LONG_BYTEORDER 3 +# define ELFSIZE 64 +#else +# define LONG_BYTEORDER 2 +# define ELFSIZE 32 +#endif + +#define BYTES_PER_LONG (1 << LONG_BYTEORDER) +#define BITS_PER_LONG (BYTES_PER_LONG << 3) +#define POINTER_ALIGN BYTES_PER_LONG + +#define BITS_PER_LLONG 64 + +/* xen_ulong_t is always 64 bits */ +#define BITS_PER_XEN_ULONG 64 + +#define CONFIG_PAGING_LEVELS 3 + +#define CONFIG_ARM 1 + +#define CONFIG_ARM_L1_CACHE_SHIFT 7 /* XXX */ + +#define CONFIG_SMP 1 + +#define CONFIG_IRQ_HAS_MULTIPLE_ACTION 1 + +#define CONFIG_PAGEALLOC_MAX_ORDER 18 +#define CONFIG_DOMU_MAX_ORDER 9 +#define CONFIG_HWDOM_MAX_ORDER 10 + +#define OPT_CONSOLE_STR "dtuart" + +#ifdef CONFIG_ARM_64 +#define MAX_VIRT_CPUS 128u +#else +#define MAX_VIRT_CPUS 8u +#endif + +#define INVALID_VCPU_ID MAX_VIRT_CPUS + +#define __LINUX_ARM_ARCH__ 7 +#define CONFIG_AEABI + +/* Linkage for ARM */ +#ifdef __ASSEMBLY__ +#define ALIGN .align 2 +#define ENTRY(name) \ + .globl name; \ + ALIGN; \ + name: +#define GLOBAL(name) \ + .globl name; \ + name: +#define END(name) \ + .size name, .-name +#define ENDPROC(name) \ + .type name, %function; \ + END(name) +#endif + +#include +#include + +/* + * Common ARM32 and ARM64 layout: + * 0 - 2M Unmapped + * 2M - 4M Xen text, data, bss + * 4M - 6M Fixmap: special-purpose 4K mapping slots + * 6M - 10M Early boot mapping of FDT + * 10M - 12M Early relocation address (used when relocating Xen) + * and later for livepatch vmap (if compiled in) + * + * ARM32 layout: + * 0 - 12M + * + * 32M - 128M Frametable: 24 bytes per page for 16GB of RAM + * 256M - 1G VMAP: ioremap and early_ioremap use this virtual address + * space + * + * 1G - 2G Xenheap: always-mapped memory + * 2G - 4G Domheap: on-demand-mapped + * + * ARM64 layout: + * 0x0000000000000000 - 0x0000007fffffffff (512GB, L0 slot [0]) + * 0 - 12M + * + * 1G - 2G VMAP: ioremap and early_ioremap + * + * 32G - 64G Frametable: 24 bytes per page for 5.3TB of RAM + * + * 0x0000008000000000 - 0x00007fffffffffff (127.5TB, L0 slots [1..255]) + * Unused + * + * 0x0000800000000000 - 0x000084ffffffffff (5TB, L0 slots [256..265]) + * 1:1 mapping of RAM + * + * 0x0000850000000000 - 0x0000ffffffffffff (123TB, L0 slots [266..511]) + * Unused + */ + +#define XEN_VIRT_START _AT(vaddr_t,0x00200000) +#define FIXMAP_ADDR(n) (_AT(vaddr_t,0x00400000) + (n) * PAGE_SIZE) + +#define BOOT_FDT_VIRT_START _AT(vaddr_t,0x00600000) +#define BOOT_FDT_SLOT_SIZE MB(4) +#define BOOT_FDT_VIRT_END (BOOT_FDT_VIRT_START + BOOT_FDT_SLOT_SIZE) + +#define BOOT_RELOC_VIRT_START _AT(vaddr_t,0x00a00000) +#ifdef CONFIG_LIVEPATCH +#define LIVEPATCH_VMAP_START _AT(vaddr_t,0x00a00000) +#define LIVEPATCH_VMAP_END (LIVEPATCH_VMAP_START + MB(2)) +#endif + +#define HYPERVISOR_VIRT_START XEN_VIRT_START + +#ifdef CONFIG_ARM_32 + +#define CONFIG_DOMAIN_PAGE 1 +#define CONFIG_SEPARATE_XENHEAP 1 + +#define FRAMETABLE_VIRT_START _AT(vaddr_t,0x02000000) +#define FRAMETABLE_SIZE MB(128-32) +#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) +#define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1) + +#define VMAP_VIRT_START _AT(vaddr_t,0x10000000) + +#define XENHEAP_VIRT_START _AT(vaddr_t,0x40000000) +#define XENHEAP_VIRT_END _AT(vaddr_t,0x7fffffff) +#define DOMHEAP_VIRT_START _AT(vaddr_t,0x80000000) +#define DOMHEAP_VIRT_END _AT(vaddr_t,0xffffffff) + +#define VMAP_VIRT_END XENHEAP_VIRT_START + +#define DOMHEAP_ENTRIES 1024 /* 1024 2MB mapping slots */ + +/* Number of domheap pagetable pages required at the second level (2MB mappings) */ +#define DOMHEAP_SECOND_PAGES ((DOMHEAP_VIRT_END - DOMHEAP_VIRT_START + 1) >> FIRST_SHIFT) + +#else /* ARM_64 */ + +#define SLOT0_ENTRY_BITS 39 +#define SLOT0(slot) (_AT(vaddr_t,slot) << SLOT0_ENTRY_BITS) +#define SLOT0_ENTRY_SIZE SLOT0(1) + +#define VMAP_VIRT_START GB(1) +#define VMAP_VIRT_END (VMAP_VIRT_START + GB(1)) + +#define FRAMETABLE_VIRT_START GB(32) +#define FRAMETABLE_SIZE GB(32) +#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) +#define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1) + +#define DIRECTMAP_VIRT_START SLOT0(256) +#define DIRECTMAP_SIZE (SLOT0_ENTRY_SIZE * (265-256)) +#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE - 1) + +#define XENHEAP_VIRT_START xenheap_virt_start + +#define HYPERVISOR_VIRT_END DIRECTMAP_VIRT_END + +#endif + +/* Fixmap slots */ +#define FIXMAP_CONSOLE 0 /* The primary UART */ +#define FIXMAP_MISC 1 /* Ephemeral mappings of hardware */ +#define FIXMAP_ACPI_BEGIN 2 /* Start mappings of ACPI tables */ +#define FIXMAP_ACPI_END (FIXMAP_ACPI_BEGIN + NUM_FIXMAP_ACPI_PAGES - 1) /* End mappings of ACPI tables */ + +#define NR_hypercalls 64 + +#define STACK_ORDER 3 +#define STACK_SIZE (PAGE_SIZE << STACK_ORDER) + +#ifndef __ASSEMBLY__ +extern unsigned long xen_phys_start; +extern unsigned long xenheap_phys_end; +extern unsigned long frametable_virt_end; +#endif + +#define watchdog_disable() ((void)0) +#define watchdog_enable() ((void)0) + +#if defined(__ASSEMBLY__) && !defined(__LINKER__) +#include +#include +#endif + +#endif /* __ARM_CONFIG_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/cpregs.h b/xen/arch/arm/include/asm/cpregs.h new file mode 100644 index 0000000000..6daf2b1a30 --- /dev/null +++ b/xen/arch/arm/include/asm/cpregs.h @@ -0,0 +1,375 @@ +#ifndef __ASM_ARM_CPREGS_H +#define __ASM_ARM_CPREGS_H + +/* + * AArch32 Co-processor registers. + * + * Note that AArch64 requires many of these definitions in order to + * support 32-bit guests. + */ + +#define __HSR_CPREG_c0 0 +#define __HSR_CPREG_c1 1 +#define __HSR_CPREG_c2 2 +#define __HSR_CPREG_c3 3 +#define __HSR_CPREG_c4 4 +#define __HSR_CPREG_c5 5 +#define __HSR_CPREG_c6 6 +#define __HSR_CPREG_c7 7 +#define __HSR_CPREG_c8 8 +#define __HSR_CPREG_c9 9 +#define __HSR_CPREG_c10 10 +#define __HSR_CPREG_c11 11 +#define __HSR_CPREG_c12 12 +#define __HSR_CPREG_c13 13 +#define __HSR_CPREG_c14 14 +#define __HSR_CPREG_c15 15 + +#define __HSR_CPREG_0 0 +#define __HSR_CPREG_1 1 +#define __HSR_CPREG_2 2 +#define __HSR_CPREG_3 3 +#define __HSR_CPREG_4 4 +#define __HSR_CPREG_5 5 +#define __HSR_CPREG_6 6 +#define __HSR_CPREG_7 7 + +#define _HSR_CPREG32(cp,op1,crn,crm,op2) \ + ((__HSR_CPREG_##crn) << HSR_CP32_CRN_SHIFT) | \ + ((__HSR_CPREG_##crm) << HSR_CP32_CRM_SHIFT) | \ + ((__HSR_CPREG_##op1) << HSR_CP32_OP1_SHIFT) | \ + ((__HSR_CPREG_##op2) << HSR_CP32_OP2_SHIFT) + +#define _HSR_CPREG64(cp,op1,crm) \ + ((__HSR_CPREG_##crm) << HSR_CP64_CRM_SHIFT) | \ + ((__HSR_CPREG_##op1) << HSR_CP64_OP1_SHIFT) + +/* Encode a register as per HSR ISS pattern */ +#define HSR_CPREG32(X...) _HSR_CPREG32(X) +#define HSR_CPREG64(X...) _HSR_CPREG64(X) + +/* + * Order registers by Coprocessor-> CRn-> Opcode 1-> CRm-> Opcode 2 + * + * This matches the ordering used in the ARM as well as the groupings + * which the CP registers are allocated in. + * + * This is slightly different to the form of the instruction + * arguments, which are cp,opc1,crn,crm,opc2. + */ + +/* Coprocessor 10 */ + +#define FPSID p10,7,c0,c0,0 /* Floating-Point System ID Register */ +#define FPSCR p10,7,c1,c0,0 /* Floating-Point Status and Control Register */ +#define MVFR0 p10,7,c7,c0,0 /* Media and VFP Feature Register 0 */ +#define MVFR1 p10,7,c6,c0,0 /* Media and VFP Feature Register 1 */ +#define MVFR2 p10,7,c5,c0,0 /* Media and VFP Feature Register 2 */ +#define FPEXC p10,7,c8,c0,0 /* Floating-Point Exception Control Register */ +#define FPINST p10,7,c9,c0,0 /* Floating-Point Instruction Register */ +#define FPINST2 p10,7,c10,c0,0 /* Floating-point Instruction Register 2 */ + +/* Coprocessor 14 */ + +/* CP14 0: Debug Register interface */ +#define DBGDIDR p14,0,c0,c0,0 /* Debug ID Register */ +#define DBGDSCRINT p14,0,c0,c1,0 /* Debug Status and Control Internal */ +#define DBGDSCREXT p14,0,c0,c2,2 /* Debug Status and Control External */ +#define DBGVCR p14,0,c0,c7,0 /* Vector Catch */ +#define DBGBVR0 p14,0,c0,c0,4 /* Breakpoint Value 0 */ +#define DBGBCR0 p14,0,c0,c0,5 /* Breakpoint Control 0 */ +#define DBGWVR0 p14,0,c0,c0,6 /* Watchpoint Value 0 */ +#define DBGWCR0 p14,0,c0,c0,7 /* Watchpoint Control 0 */ +#define DBGBVR1 p14,0,c0,c1,4 /* Breakpoint Value 1 */ +#define DBGBCR1 p14,0,c0,c1,5 /* Breakpoint Control 1 */ +#define DBGOSLAR p14,0,c1,c0,4 /* OS Lock Access */ +#define DBGOSLSR p14,0,c1,c1,4 /* OS Lock Status Register */ +#define DBGOSDLR p14,0,c1,c3,4 /* OS Double Lock */ +#define DBGPRCR p14,0,c1,c4,4 /* Debug Power Control Register */ + +/* CP14 CR0: */ +#define TEECR p14,6,c0,c0,0 /* ThumbEE Configuration Register */ + +/* CP14 CR1: */ +#define DBGDRAR64 p14,0,c1 /* Debug ROM Address Register (64-bit access) */ +#define DBGDRAR p14,0,c1,c0,0 /* Debug ROM Address Register (32-bit access) */ +#define TEEHBR p14,6,c1,c0,0 /* ThumbEE Handler Base Register */ +#define JOSCR p14,7,c1,c0,0 /* Jazelle OS Control Register */ + +/* CP14 CR2: */ +#define DBGDSAR64 p14,0,c2 /* Debug Self Address Offset Register (64-bit access) */ +#define DBGDSAR p14,0,c2,c0,0 /* Debug Self Address Offset Register (32-bit access) */ +#define JMCR p14,7,c2,c0,0 /* Jazelle Main Configuration Register */ + + +/* Coprocessor 15 */ + +/* CP15 CR0: CPUID and Cache Type Registers */ +#define MIDR p15,0,c0,c0,0 /* Main ID Register */ +#define CTR p15,0,c0,c0,1 /* Cache Type Register */ +#define MPIDR p15,0,c0,c0,5 /* Multiprocessor Affinity Register */ +#define ID_PFR0 p15,0,c0,c1,0 /* Processor Feature Register 0 */ +#define ID_PFR1 p15,0,c0,c1,1 /* Processor Feature Register 1 */ +#define ID_PFR2 p15,0,c0,c3,4 /* Processor Feature Register 2 */ +#define ID_DFR0 p15,0,c0,c1,2 /* Debug Feature Register 0 */ +#define ID_DFR1 p15,0,c0,c3,5 /* Debug Feature Register 1 */ +#define ID_AFR0 p15,0,c0,c1,3 /* Auxiliary Feature Register 0 */ +#define ID_MMFR0 p15,0,c0,c1,4 /* Memory Model Feature Register 0 */ +#define ID_MMFR1 p15,0,c0,c1,5 /* Memory Model Feature Register 1 */ +#define ID_MMFR2 p15,0,c0,c1,6 /* Memory Model Feature Register 2 */ +#define ID_MMFR3 p15,0,c0,c1,7 /* Memory Model Feature Register 3 */ +#define ID_MMFR4 p15,0,c0,c2,6 /* Memory Model Feature Register 4 */ +#define ID_MMFR5 p15,0,c0,c3,6 /* Memory Model Feature Register 5 */ +#define ID_ISAR0 p15,0,c0,c2,0 /* ISA Feature Register 0 */ +#define ID_ISAR1 p15,0,c0,c2,1 /* ISA Feature Register 1 */ +#define ID_ISAR2 p15,0,c0,c2,2 /* ISA Feature Register 2 */ +#define ID_ISAR3 p15,0,c0,c2,3 /* ISA Feature Register 3 */ +#define ID_ISAR4 p15,0,c0,c2,4 /* ISA Feature Register 4 */ +#define ID_ISAR5 p15,0,c0,c2,5 /* ISA Feature Register 5 */ +#define ID_ISAR6 p15,0,c0,c2,7 /* ISA Feature Register 6 */ +#define CCSIDR p15,1,c0,c0,0 /* Cache Size ID Registers */ +#define CLIDR p15,1,c0,c0,1 /* Cache Level ID Register */ +#define CSSELR p15,2,c0,c0,0 /* Cache Size Selection Register */ +#define VPIDR p15,4,c0,c0,0 /* Virtualization Processor ID Register */ +#define VMPIDR p15,4,c0,c0,5 /* Virtualization Multiprocessor ID Register */ + +/* CP15 CR1: System Control Registers */ +#define SCTLR p15,0,c1,c0,0 /* System Control Register */ +#define ACTLR p15,0,c1,c0,1 /* Auxiliary Control Register */ +#define CPACR p15,0,c1,c0,2 /* Coprocessor Access Control Register */ +#define SCR p15,0,c1,c1,0 /* Secure Configuration Register */ +#define NSACR p15,0,c1,c1,2 /* Non-Secure Access Control Register */ +#define HSCTLR p15,4,c1,c0,0 /* Hyp. System Control Register */ +#define HCR p15,4,c1,c1,0 /* Hyp. Configuration Register */ +#define HDCR p15,4,c1,c1,1 /* Hyp. Debug Configuration Register */ +#define HCPTR p15,4,c1,c1,2 /* Hyp. Coprocessor Trap Register */ +#define HSTR p15,4,c1,c1,3 /* Hyp. System Trap Register */ + +/* CP15 CR2: Translation Table Base and Control Registers */ +#define TTBCR p15,0,c2,c0,2 /* Translation Table Base Control Register */ +#define TTBCR2 p15,0,c2,c0,3 /* Translation Table Base Control Register 2 */ +#define TTBR0 p15,0,c2 /* Translation Table Base Reg. 0 */ +#define TTBR1 p15,1,c2 /* Translation Table Base Reg. 1 */ +#define HTTBR p15,4,c2 /* Hyp. Translation Table Base Register */ +#define TTBR0_32 p15,0,c2,c0,0 /* 32-bit access to TTBR0 */ +#define TTBR1_32 p15,0,c2,c0,1 /* 32-bit access to TTBR1 */ +#define HTCR p15,4,c2,c0,2 /* Hyp. Translation Control Register */ +#define VTCR p15,4,c2,c1,2 /* Virtualization Translation Control Register */ +#define VTTBR p15,6,c2 /* Virtualization Translation Table Base Register */ + +/* CP15 CR3: Domain Access Control Register */ +#define DACR p15,0,c3,c0,0 /* Domain Access Control Register */ + +/* CP15 CR4: */ + +/* CP15 CR5: Fault Status Registers */ +#define DFSR p15,0,c5,c0,0 /* Data Fault Status Register */ +#define IFSR p15,0,c5,c0,1 /* Instruction Fault Status Register */ +#define ADFSR p15,0,c5,c1,0 /* Auxiliary Data Fault Status Register */ +#define AIFSR p15,0,c5,c1,1 /* Auxiliary Instruction Fault Status Register */ +#define HSR p15,4,c5,c2,0 /* Hyp. Syndrome Register */ + +/* CP15 CR6: Fault Address Registers */ +#define DFAR p15,0,c6,c0,0 /* Data Fault Address Register */ +#define IFAR p15,0,c6,c0,2 /* Instruction Fault Address Register */ +#define HDFAR p15,4,c6,c0,0 /* Hyp. Data Fault Address Register */ +#define HIFAR p15,4,c6,c0,2 /* Hyp. Instruction Fault Address Register */ +#define HPFAR p15,4,c6,c0,4 /* Hyp. IPA Fault Address Register */ + +/* CP15 CR7: Cache and address translation operations */ +#define PAR p15,0,c7 /* Physical Address Register */ + +#define ICIALLUIS p15,0,c7,c1,0 /* Invalidate all instruction caches to PoU inner shareable */ +#define BPIALLIS p15,0,c7,c1,6 /* Invalidate entire branch predictor array inner shareable */ +#define ICIALLU p15,0,c7,c5,0 /* Invalidate all instruction caches to PoU */ +#define ICIMVAU p15,0,c7,c5,1 /* Invalidate instruction caches by MVA to PoU */ +#define BPIALL p15,0,c7,c5,6 /* Invalidate entire branch predictor array */ +#define BPIMVA p15,0,c7,c5,7 /* Invalidate MVA from branch predictor array */ +#define DCIMVAC p15,0,c7,c6,1 /* Invalidate data cache line by MVA to PoC */ +#define DCISW p15,0,c7,c6,2 /* Invalidate data cache line by set/way */ +#define ATS1CPR p15,0,c7,c8,0 /* Address Translation Stage 1. Non-Secure Kernel Read */ +#define ATS1CPW p15,0,c7,c8,1 /* Address Translation Stage 1. Non-Secure Kernel Write */ +#define ATS1CUR p15,0,c7,c8,2 /* Address Translation Stage 1. Non-Secure User Read */ +#define ATS1CUW p15,0,c7,c8,3 /* Address Translation Stage 1. Non-Secure User Write */ +#define ATS12NSOPR p15,0,c7,c8,4 /* Address Translation Stage 1+2 Non-Secure Kernel Read */ +#define ATS12NSOPW p15,0,c7,c8,5 /* Address Translation Stage 1+2 Non-Secure Kernel Write */ +#define ATS12NSOUR p15,0,c7,c8,6 /* Address Translation Stage 1+2 Non-Secure User Read */ +#define ATS12NSOUW p15,0,c7,c8,7 /* Address Translation Stage 1+2 Non-Secure User Write */ +#define DCCMVAC p15,0,c7,c10,1 /* Clean data or unified cache line by MVA to PoC */ +#define DCCSW p15,0,c7,c10,2 /* Clean data cache line by set/way */ +#define DCCMVAU p15,0,c7,c11,1 /* Clean data cache line by MVA to PoU */ +#define DCCIMVAC p15,0,c7,c14,1 /* Data cache clean and invalidate by MVA */ +#define DCCISW p15,0,c7,c14,2 /* Clean and invalidate data cache line by set/way */ +#define ATS1HR p15,4,c7,c8,0 /* Address Translation Stage 1 Hyp. Read */ +#define ATS1HW p15,4,c7,c8,1 /* Address Translation Stage 1 Hyp. Write */ + +/* CP15 CR8: TLB maintenance operations */ +#define TLBIALLIS p15,0,c8,c3,0 /* Invalidate entire TLB innrer shareable */ +#define TLBIMVAIS p15,0,c8,c3,1 /* Invalidate unified TLB entry by MVA inner shareable */ +#define TLBIASIDIS p15,0,c8,c3,2 /* Invalidate unified TLB by ASID match inner shareable */ +#define TLBIMVAAIS p15,0,c8,c3,3 /* Invalidate unified TLB entry by MVA all ASID inner shareable */ +#define ITLBIALL p15,0,c8,c5,0 /* Invalidate instruction TLB */ +#define ITLBIMVA p15,0,c8,c5,1 /* Invalidate instruction TLB entry by MVA */ +#define ITLBIASID p15,0,c8,c5,2 /* Invalidate instruction TLB by ASID match */ +#define DTLBIALL p15,0,c8,c6,0 /* Invalidate data TLB */ +#define DTLBIMVA p15,0,c8,c6,1 /* Invalidate data TLB entry by MVA */ +#define DTLBIASID p15,0,c8,c6,2 /* Invalidate data TLB by ASID match */ +#define TLBIALL p15,0,c8,c7,0 /* invalidate unified TLB */ +#define TLBIMVA p15,0,c8,c7,1 /* invalidate unified TLB entry by MVA */ +#define TLBIASID p15,0,c8,c7,2 /* invalid unified TLB by ASID match */ +#define TLBIMVAA p15,0,c8,c7,3 /* invalidate unified TLB entries by MVA all ASID */ +#define TLBIALLHIS p15,4,c8,c3,0 /* Invalidate Entire Hyp. Unified TLB inner shareable */ +#define TLBIMVAHIS p15,4,c8,c3,1 /* Invalidate Unified Hyp. TLB by MVA inner shareable */ +#define TLBIALLNSNHIS p15,4,c8,c3,4 /* Invalidate Entire Non-Secure Non-Hyp. Unified TLB inner shareable */ +#define TLBIALLH p15,4,c8,c7,0 /* Invalidate Entire Hyp. Unified TLB */ +#define TLBIMVAH p15,4,c8,c7,1 /* Invalidate Unified Hyp. TLB by MVA */ +#define TLBIALLNSNH p15,4,c8,c7,4 /* Invalidate Entire Non-Secure Non-Hyp. Unified TLB */ + +/* CP15 CR9: Performance monitors */ +#define PMCR p15,0,c9,c12,0 /* Perf. Mon. Control Register */ +#define PMCNTENSET p15,0,c9,c12,1 /* Perf. Mon. Count Enable Set register */ +#define PMCNTENCLR p15,0,c9,c12,2 /* Perf. Mon. Count Enable Clear register */ +#define PMOVSR p15,0,c9,c12,3 /* Perf. Mon. Overflow Flag Status Register */ +#define PMSWINC p15,0,c9,c12,4 /* Perf. Mon. Software Increment register */ +#define PMSELR p15,0,c9,c12,5 /* Perf. Mon. Event Counter Selection Register */ +#define PMCEID0 p15,0,c9,c12,6 /* Perf. Mon. Common Event Identification register 0 */ +#define PMCEID1 p15,0,c9,c12,7 /* Perf. Mon. Common Event Identification register 1 */ +#define PMCCNTR p15,0,c9,c13,0 /* Perf. Mon. Cycle Count Register */ +#define PMXEVTYPER p15,0,c9,c13,1 /* Perf. Mon. Event Type Select Register */ +#define PMXEVCNTR p15,0,c9,c13,2 /* Perf. Mon. Event Count Register */ +#define PMUSERENR p15,0,c9,c14,0 /* Perf. Mon. User Enable Register */ +#define PMINTENSET p15,0,c9,c14,1 /* Perf. Mon. Interrupt Enable Set Register */ +#define PMINTENCLR p15,0,c9,c14,2 /* Perf. Mon. Interrupt Enable Clear Register */ +#define PMOVSSET p15,0,c9,c14,3 /* Perf. Mon. Overflow Flag Status Set register */ + +/* CP15 CR10: */ +#define MAIR0 p15,0,c10,c2,0 /* Memory Attribute Indirection Register 0 AKA PRRR */ +#define MAIR1 p15,0,c10,c2,1 /* Memory Attribute Indirection Register 1 AKA NMRR */ +#define HMAIR0 p15,4,c10,c2,0 /* Hyp. Memory Attribute Indirection Register 0 */ +#define HMAIR1 p15,4,c10,c2,1 /* Hyp. Memory Attribute Indirection Register 1 */ +#define AMAIR0 p15,0,c10,c3,0 /* Aux. Memory Attribute Indirection Register 0 */ +#define AMAIR1 p15,0,c10,c3,1 /* Aux. Memory Attribute Indirection Register 1 */ + +/* CP15 CR11: DMA Operations for TCM Access */ + +/* CP15 CR12: */ +#define ICC_SGI1R p15,0,c12 /* Interrupt Controller SGI Group 1 */ +#define ICC_ASGI1R p15,1,c12 /* Interrupt Controller Alias SGI Group 1 Register */ +#define ICC_SGI0R p15,2,c12 /* Interrupt Controller SGI Group 0 */ +#define VBAR p15,0,c12,c0,0 /* Vector Base Address Register */ +#define HVBAR p15,4,c12,c0,0 /* Hyp. Vector Base Address Register */ + +/* CP15 CR13: */ +#define FCSEIDR p15,0,c13,c0,0 /* FCSE Process ID Register */ +#define CONTEXTIDR p15,0,c13,c0,1 /* Context ID Register */ +#define TPIDRURW p15,0,c13,c0,2 /* Software Thread ID, User, R/W */ +#define TPIDRURO p15,0,c13,c0,3 /* Software Thread ID, User, R/O */ +#define TPIDRPRW p15,0,c13,c0,4 /* Software Thread ID, Priveleged */ +#define HTPIDR p15,4,c13,c0,2 /* HYp Software Thread Id Register */ + +/* CP15 CR14: */ +#define CNTPCT p15,0,c14 /* Time counter value */ +#define CNTFRQ p15,0,c14,c0,0 /* Time counter frequency */ +#define CNTKCTL p15,0,c14,c1,0 /* Time counter kernel control */ +#define CNTP_TVAL p15,0,c14,c2,0 /* Physical Timer value */ +#define CNTP_CTL p15,0,c14,c2,1 /* Physical Timer control register */ +#define CNTVCT p15,1,c14 /* Time counter value + offset */ +#define CNTP_CVAL p15,2,c14 /* Physical Timer comparator */ +#define CNTV_CVAL p15,3,c14 /* Virt. Timer comparator */ +#define CNTVOFF p15,4,c14 /* Time counter offset */ +#define CNTHCTL p15,4,c14,c1,0 /* Time counter hyp. control */ +#define CNTHP_TVAL p15,4,c14,c2,0 /* Hyp. Timer value */ +#define CNTHP_CTL p15,4,c14,c2,1 /* Hyp. Timer control register */ +#define CNTV_TVAL p15,0,c14,c3,0 /* Virt. Timer value */ +#define CNTV_CTL p15,0,c14,c3,1 /* Virt. TImer control register */ +#define CNTHP_CVAL p15,6,c14 /* Hyp. Timer comparator */ + +/* CP15 CR15: Implementation Defined Registers */ + +/* Aliases of AArch64 names for use in common code when building for AArch32 */ +#ifdef CONFIG_ARM_32 +/* Alphabetically... */ +#define ACTLR_EL1 ACTLR +#define AFSR0_EL1 ADFSR +#define AFSR1_EL1 AIFSR +#define CCSIDR_EL1 CCSIDR +#define CLIDR_EL1 CLIDR +#define CNTFRQ_EL0 CNTFRQ +#define CNTHCTL_EL2 CNTHCTL +#define CNTHP_CTL_EL2 CNTHP_CTL +#define CNTHP_CVAL_EL2 CNTHP_CVAL +#define CNTKCTL_EL1 CNTKCTL +#define CNTPCT_EL0 CNTPCT +#define CNTP_CTL_EL0 CNTP_CTL +#define CNTP_CVAL_EL0 CNTP_CVAL +#define CNTVCT_EL0 CNTVCT +#define CNTVOFF_EL2 CNTVOFF +#define CNTV_CTL_EL0 CNTV_CTL +#define CNTV_CVAL_EL0 CNTV_CVAL +#define CONTEXTIDR_EL1 CONTEXTIDR +#define CPACR_EL1 CPACR +#define CPTR_EL2 HCPTR +#define CSSELR_EL1 CSSELR +#define CTR_EL0 CTR +#define DACR32_EL2 DACR +#define ESR_EL1 DFSR +#define ESR_EL2 HSR +#define HCR_EL2 HCR +#define HPFAR_EL2 HPFAR +#define HSTR_EL2 HSTR +#define ID_AFR0_EL1 ID_AFR0 +#define ID_DFR0_EL1 ID_DFR0 +#define ID_DFR1_EL1 ID_DFR1 +#define ID_ISAR0_EL1 ID_ISAR0 +#define ID_ISAR1_EL1 ID_ISAR1 +#define ID_ISAR2_EL1 ID_ISAR2 +#define ID_ISAR3_EL1 ID_ISAR3 +#define ID_ISAR4_EL1 ID_ISAR4 +#define ID_ISAR5_EL1 ID_ISAR5 +#define ID_ISAR6_EL1 ID_ISAR6 +#define ID_MMFR0_EL1 ID_MMFR0 +#define ID_MMFR1_EL1 ID_MMFR1 +#define ID_MMFR2_EL1 ID_MMFR2 +#define ID_MMFR3_EL1 ID_MMFR3 +#define ID_MMFR4_EL1 ID_MMFR4 +#define ID_MMFR5_EL1 ID_MMFR5 +#define ID_PFR0_EL1 ID_PFR0 +#define ID_PFR1_EL1 ID_PFR1 +#define ID_PFR2_EL1 ID_PFR2 +#define IFSR32_EL2 IFSR +#define MDCR_EL2 HDCR +#define MIDR_EL1 MIDR +#define MPIDR_EL1 MPIDR +#define PAR_EL1 PAR +#define SCTLR_EL1 SCTLR +#define SCTLR_EL2 HSCTLR +#define TCR_EL1 TTBCR +#define TEECR32_EL1 TEECR +#define TEEHBR32_EL1 TEEHBR +#define TPIDRRO_EL0 TPIDRURO +#define TPIDR_EL0 TPIDRURW +#define TPIDR_EL1 TPIDRPRW +#define TPIDR_EL2 HTPIDR +#define TTBR0_EL1 TTBR0 +#define TTBR0_EL2 HTTBR +#define TTBR1_EL1 TTBR1 +#define VBAR_EL1 VBAR +#define VBAR_EL2 HVBAR +#define VMPIDR_EL2 VMPIDR +#define VPIDR_EL2 VPIDR +#define VTCR_EL2 VTCR +#define VTTBR_EL2 VTTBR +#define MVFR0_EL1 MVFR0 +#define MVFR1_EL1 MVFR1 +#define MVFR2_EL1 MVFR2 +#endif + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/cpuerrata.h b/xen/arch/arm/include/asm/cpuerrata.h new file mode 100644 index 0000000000..8d7e7b9375 --- /dev/null +++ b/xen/arch/arm/include/asm/cpuerrata.h @@ -0,0 +1,85 @@ +#ifndef __ARM_CPUERRATA_H__ +#define __ARM_CPUERRATA_H__ + +#include +#include + +void check_local_cpu_errata(void); +void enable_errata_workarounds(void); + +#define CHECK_WORKAROUND_HELPER(erratum, feature, arch) \ +static inline bool check_workaround_##erratum(void) \ +{ \ + if ( !IS_ENABLED(arch) ) \ + return false; \ + else \ + { \ + register_t ret; \ + \ + asm volatile (ALTERNATIVE("mov %0, #0", \ + "mov %0, #1", \ + feature) \ + : "=r" (ret)); \ + \ + return unlikely(ret); \ + } \ +} + +CHECK_WORKAROUND_HELPER(766422, ARM32_WORKAROUND_766422, CONFIG_ARM_32) +CHECK_WORKAROUND_HELPER(834220, ARM64_WORKAROUND_834220, CONFIG_ARM_64) +CHECK_WORKAROUND_HELPER(ssbd, ARM_SSBD, CONFIG_ARM_SSBD) +CHECK_WORKAROUND_HELPER(858921, ARM_WORKAROUND_858921, + CONFIG_ARM_ERRATUM_858921) + +#undef CHECK_WORKAROUND_HELPER + +enum ssbd_state +{ + ARM_SSBD_UNKNOWN, + ARM_SSBD_FORCE_DISABLE, + ARM_SSBD_RUNTIME, + ARM_SSBD_FORCE_ENABLE, + ARM_SSBD_MITIGATED, +}; + +#ifdef CONFIG_ARM_SSBD + +#include + +extern enum ssbd_state ssbd_state; + +static inline enum ssbd_state get_ssbd_state(void) +{ + return ssbd_state; +} + +DECLARE_PER_CPU(register_t, ssbd_callback_required); + +static inline bool cpu_require_ssbd_mitigation(void) +{ + return this_cpu(ssbd_callback_required); +} + +#else + +static inline bool cpu_require_ssbd_mitigation(void) +{ + return false; +} + +static inline enum ssbd_state get_ssbd_state(void) +{ + return ARM_SSBD_UNKNOWN; +} + +#endif + +#endif /* __ARM_CPUERRATA_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/cpufeature.h b/xen/arch/arm/include/asm/cpufeature.h new file mode 100644 index 0000000000..8a5afbaf0b --- /dev/null +++ b/xen/arch/arm/include/asm/cpufeature.h @@ -0,0 +1,428 @@ +#ifndef __ASM_ARM_CPUFEATURE_H +#define __ASM_ARM_CPUFEATURE_H + +#ifdef CONFIG_ARM_64 +#define cpu_feature64(c, feat) ((c)->pfr64.feat) +#define boot_cpu_feature64(feat) (system_cpuinfo.pfr64.feat) +#define boot_dbg_feature64(feat) (system_cpuinfo.dbg64.feat) + +#define cpu_feature64_has_el0_32(c) (cpu_feature64(c, el0) == 2) + +#define cpu_has_el0_32 (boot_cpu_feature64(el0) == 2) +#define cpu_has_el0_64 (boot_cpu_feature64(el0) >= 1) +#define cpu_has_el1_32 (boot_cpu_feature64(el1) == 2) +#define cpu_has_el1_64 (boot_cpu_feature64(el1) >= 1) +#define cpu_has_el2_32 (boot_cpu_feature64(el2) == 2) +#define cpu_has_el2_64 (boot_cpu_feature64(el2) >= 1) +#define cpu_has_el3_32 (boot_cpu_feature64(el3) == 2) +#define cpu_has_el3_64 (boot_cpu_feature64(el3) >= 1) +#define cpu_has_fp (boot_cpu_feature64(fp) < 8) +#define cpu_has_simd (boot_cpu_feature64(simd) < 8) +#define cpu_has_gicv3 (boot_cpu_feature64(gic) >= 1) +#endif + +#define cpu_feature32(c, feat) ((c)->pfr32.feat) +#define boot_cpu_feature32(feat) (system_cpuinfo.pfr32.feat) +#define boot_dbg_feature32(feat) (system_cpuinfo.dbg32.feat) + +#define cpu_has_arm (boot_cpu_feature32(arm) == 1) +#define cpu_has_thumb (boot_cpu_feature32(thumb) >= 1) +#define cpu_has_thumb2 (boot_cpu_feature32(thumb) >= 3) +#define cpu_has_jazelle (boot_cpu_feature32(jazelle) > 0) +#define cpu_has_thumbee (boot_cpu_feature32(thumbee) == 1) +#define cpu_has_aarch32 (cpu_has_arm || cpu_has_thumb) + +#ifdef CONFIG_ARM_32 +#define cpu_has_gentimer (boot_cpu_feature32(gentimer) == 1) +/* + * On Armv7, the value 0 is used to indicate that PMUv2 is not + * supported. IOW this doesn't tell us whether the PMU is not supported + * (a processor may implement PMUv1). + * + * For convenience, we treat 0 as not supported which matches the + * meaning on Armv8 + */ +#define cpu_has_pmu ((boot_dbg_feature32(perfmon) >= 1) && \ + (boot_dbg_feature32(perfmon) < 15)) +#else +#define cpu_has_gentimer (1) +#define cpu_has_pmu ((boot_dbg_feature64(pmu_ver) >= 1) && \ + (boot_dbg_feature64(pmu_ver) < 15)) +#endif +#define cpu_has_security (boot_cpu_feature32(security) > 0) + +#define ARM64_WORKAROUND_CLEAN_CACHE 0 +#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 +#define ARM32_WORKAROUND_766422 2 +#define ARM64_WORKAROUND_834220 3 +#define LIVEPATCH_FEATURE 4 +#define SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT 5 +#define ARM_HARDEN_BRANCH_PREDICTOR 6 +#define ARM_SSBD 7 +#define ARM_SMCCC_1_1 8 +#define ARM64_WORKAROUND_AT_SPECULATE 9 +#define ARM_WORKAROUND_858921 10 +#define ARM64_WORKAROUND_REPEAT_TLBI 11 + +#define ARM_NCAPS 12 + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +extern DECLARE_BITMAP(cpu_hwcaps, ARM_NCAPS); + +static inline bool cpus_have_cap(unsigned int num) +{ + if ( num >= ARM_NCAPS ) + return false; + + return test_bit(num, cpu_hwcaps); +} + +static inline int cpu_nr_siblings(unsigned int cpu) +{ + return 1; +} + +/* System capability check for constant cap */ +#define cpus_have_const_cap(num) ({ \ + register_t __ret; \ + \ + asm volatile (ALTERNATIVE("mov %0, #0", \ + "mov %0, #1", \ + num) \ + : "=r" (__ret)); \ + \ + unlikely(__ret); \ + }) + +static inline void cpus_set_cap(unsigned int num) +{ + if (num >= ARM_NCAPS) + printk(XENLOG_WARNING "Attempt to set an illegal CPU capability (%d >= %d)\n", + num, ARM_NCAPS); + else + __set_bit(num, cpu_hwcaps); +} + +struct arm_cpu_capabilities { + const char *desc; + u16 capability; + bool (*matches)(const struct arm_cpu_capabilities *); + int (*enable)(void *); /* Called on every active CPUs */ + union { + struct { /* To be used for eratum handling only */ + u32 midr_model; + u32 midr_range_min, midr_range_max; + }; + }; +}; + +void update_cpu_capabilities(const struct arm_cpu_capabilities *caps, + const char *info); + +void enable_cpu_capabilities(const struct arm_cpu_capabilities *caps); +int enable_nonboot_cpu_caps(const struct arm_cpu_capabilities *caps); + +/* + * capabilities of CPUs + */ +struct cpuinfo_arm { + union { + register_t bits; + struct { + unsigned long revision:4; + unsigned long part_number:12; + unsigned long architecture:4; + unsigned long variant:4; + unsigned long implementer:8; +#ifdef CONFIG_ARM_64 + unsigned long _res0:32; +#endif + }; + } midr; + union { + register_t bits; + struct { + unsigned long aff0:8; + unsigned long aff1:8; + unsigned long aff2:8; + unsigned long mt:1; /* Multi-thread, iff MP == 1 */ + unsigned long __res0:5; + unsigned long up:1; /* UP system, iff MP == 1 */ + unsigned long mp:1; /* MP extensions */ + +#ifdef CONFIG_ARM_64 + unsigned long aff3:8; + unsigned long __res1:24; +#endif + }; + } mpidr; + +#ifdef CONFIG_ARM_64 + /* 64-bit CPUID registers. */ + union { + register_t bits[2]; + struct { + /* PFR0 */ + unsigned long el0:4; + unsigned long el1:4; + unsigned long el2:4; + unsigned long el3:4; + unsigned long fp:4; /* Floating Point */ + unsigned long simd:4; /* Advanced SIMD */ + unsigned long gic:4; /* GIC support */ + unsigned long ras:4; + unsigned long sve:4; + unsigned long sel2:4; + unsigned long mpam:4; + unsigned long amu:4; + unsigned long dit:4; + unsigned long __res0:4; + unsigned long csv2:4; + unsigned long cvs3:4; + + /* PFR1 */ + unsigned long bt:4; + unsigned long ssbs:4; + unsigned long mte:4; + unsigned long ras_frac:4; + unsigned long mpam_frac:4; + unsigned long __res1:44; + }; + } pfr64; + + union { + register_t bits[2]; + struct { + /* DFR0 */ + unsigned long debug_ver:4; + unsigned long trace_ver:4; + unsigned long pmu_ver:4; + unsigned long brps:4; + unsigned long __res0:4; + unsigned long wrps:4; + unsigned long __res1:4; + unsigned long ctx_cmps:4; + unsigned long pms_ver:4; + unsigned long double_lock:4; + unsigned long trace_filt:4; + unsigned long __res2:4; + unsigned long mtpmu:4; + unsigned long __res3:12; + + /* DFR1 */ + unsigned long __res4:64; + }; + } dbg64; + + struct { + register_t bits[2]; + } aux64; + + union { + register_t bits[3]; + struct { + unsigned long pa_range:4; + unsigned long asid_bits:4; + unsigned long bigend:4; + unsigned long secure_ns:4; + unsigned long bigend_el0:4; + unsigned long tgranule_16K:4; + unsigned long tgranule_64K:4; + unsigned long tgranule_4K:4; + unsigned long __res0:32; + + unsigned long hafdbs:4; + unsigned long vmid_bits:4; + unsigned long vh:4; + unsigned long hpds:4; + unsigned long lo:4; + unsigned long pan:4; + unsigned long __res1:8; + unsigned long __res2:32; + + unsigned long __res3:64; + }; + } mm64; + + union { + register_t bits[2]; + struct { + /* ISAR0 */ + unsigned long __res0:4; + unsigned long aes:4; + unsigned long sha1:4; + unsigned long sha2:4; + unsigned long crc32:4; + unsigned long atomic:4; + unsigned long __res1:4; + unsigned long rdm:4; + unsigned long sha3:4; + unsigned long sm3:4; + unsigned long sm4:4; + unsigned long dp:4; + unsigned long fhm:4; + unsigned long ts:4; + unsigned long tlb:4; + unsigned long rndr:4; + + /* ISAR1 */ + unsigned long dpb:4; + unsigned long apa:4; + unsigned long api:4; + unsigned long jscvt:4; + unsigned long fcma:4; + unsigned long lrcpc:4; + unsigned long gpa:4; + unsigned long gpi:4; + unsigned long frintts:4; + unsigned long sb:4; + unsigned long specres:4; + unsigned long bf16:4; + unsigned long dgh:4; + unsigned long i8mm:4; + unsigned long __res2:8; + }; + } isa64; + + struct { + register_t bits[1]; + } zfr64; + + /* + * DCZID is only used to check for incoherent values between cores + * and taint Xen in this case + */ + struct { + register_t bits[1]; + } dczid; + + /* + * CTR is only used to check for different cache types or policies and + * taint Xen in this case + */ + struct { + register_t bits[1]; + } ctr; + +#endif + + /* + * 32-bit CPUID registers. On ARMv8 these describe the properties + * when running in 32-bit mode. + */ + union { + register_t bits[3]; + struct { + /* PFR0 */ + unsigned long arm:4; + unsigned long thumb:4; + unsigned long jazelle:4; + unsigned long thumbee:4; + unsigned long csv2:4; + unsigned long amu:4; + unsigned long dit:4; + unsigned long ras:4; +#ifdef CONFIG_ARM_64 + unsigned long __res0:32; +#endif + + /* PFR1 */ + unsigned long progmodel:4; + unsigned long security:4; + unsigned long mprofile:4; + unsigned long virt:4; + unsigned long gentimer:4; + unsigned long sec_frac:4; + unsigned long virt_frac:4; + unsigned long gic:4; +#ifdef CONFIG_ARM_64 + unsigned long __res1:32; +#endif + + /* PFR2 */ + unsigned long csv3:4; + unsigned long ssbs:4; + unsigned long ras_frac:4; + unsigned long __res2:20; +#ifdef CONFIG_ARM_64 + unsigned long __res3:32; +#endif + }; + } pfr32; + + union { + register_t bits[2]; + struct { + /* DFR0 */ + unsigned long copdbg:4; + unsigned long copsdbg:4; + unsigned long mmapdbg:4; + unsigned long coptrc:4; + unsigned long mmaptrc:4; + unsigned long mprofdbg:4; + unsigned long perfmon:4; + unsigned long tracefilt:4; +#ifdef CONFIG_ARM_64 + unsigned long __res0:32; +#endif + + /* DFR1 */ + unsigned long mtpmu:4; + unsigned long __res1:28; +#ifdef CONFIG_ARM_64 + unsigned long __res2:32; +#endif + }; + } dbg32; + + struct { + register_t bits[1]; + } aux32; + + struct { + register_t bits[6]; + } mm32; + + struct { + register_t bits[7]; + } isa32; + + struct { + register_t bits[3]; + } mvfr; +}; + +extern struct cpuinfo_arm system_cpuinfo; + +extern void identify_cpu(struct cpuinfo_arm *); + +#ifdef CONFIG_ARM_64 +extern void update_system_features(const struct cpuinfo_arm *); +#else +static inline void update_system_features(const struct cpuinfo_arm *cpuinfo) +{ + /* Not supported on arm32 */ +} +#endif + +extern struct cpuinfo_arm cpu_data[]; +#define current_cpu_data cpu_data[smp_processor_id()] + +extern struct cpuinfo_arm guest_cpuinfo; + +#endif /* __ASSEMBLY__ */ + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/current.h b/xen/arch/arm/include/asm/current.h new file mode 100644 index 0000000000..73e81458e5 --- /dev/null +++ b/xen/arch/arm/include/asm/current.h @@ -0,0 +1,73 @@ +#ifndef __ARM_CURRENT_H__ +#define __ARM_CURRENT_H__ + +#include +#include + +#include + +/* Tell whether the guest vCPU enabled Workaround 2 (i.e variant 4) */ +#define CPUINFO_WORKAROUND_2_FLAG_SHIFT 0 +#define CPUINFO_WORKAROUND_2_FLAG (_AC(1, U) << CPUINFO_WORKAROUND_2_FLAG_SHIFT) + +#ifndef __ASSEMBLY__ + +struct vcpu; + +/* Which VCPU is "current" on this PCPU. */ +DECLARE_PER_CPU(struct vcpu *, curr_vcpu); + +#define current (this_cpu(curr_vcpu)) +#define set_current(vcpu) do { current = (vcpu); } while (0) +#define get_cpu_current(cpu) (per_cpu(curr_vcpu, cpu)) + +/* Per-VCPU state that lives at the top of the stack */ +struct cpu_info { + struct cpu_user_regs guest_cpu_user_regs; + unsigned long elr; + uint32_t flags; +}; + +static inline struct cpu_info *get_cpu_info(void) +{ +#ifdef __clang__ + unsigned long sp; + + asm ("mov %0, sp" : "=r" (sp)); +#else + register unsigned long sp asm ("sp"); +#endif + + return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + + STACK_SIZE - sizeof(struct cpu_info)); +} + +#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) + +#define switch_stack_and_jump(stack, fn) do { \ + asm volatile ("mov sp,%0; b " STR(fn) : : "r" (stack) : "memory" ); \ + unreachable(); \ +} while ( false ) + +#define reset_stack_and_jump(fn) switch_stack_and_jump(get_cpu_info(), fn) + +DECLARE_PER_CPU(unsigned int, cpu_id); + +#define get_processor_id() this_cpu(cpu_id) +#define set_processor_id(id) \ +do { \ + WRITE_SYSREG(__per_cpu_offset[(id)], TPIDR_EL2); \ + this_cpu(cpu_id) = (id); \ +} while ( 0 ) + +#endif + +#endif /* __ARM_CURRENT_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/debugger.h b/xen/arch/arm/include/asm/debugger.h new file mode 100644 index 0000000000..ac776efa78 --- /dev/null +++ b/xen/arch/arm/include/asm/debugger.h @@ -0,0 +1,15 @@ +#ifndef __ARM_DEBUGGER_H__ +#define __ARM_DEBUGGER_H__ + +#define debugger_trap_fatal(v, r) (0) +#define debugger_trap_immediate() ((void) 0) + +#endif /* __ARM_DEBUGGER_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/delay.h b/xen/arch/arm/include/asm/delay.h new file mode 100644 index 0000000000..042907d9d5 --- /dev/null +++ b/xen/arch/arm/include/asm/delay.h @@ -0,0 +1,14 @@ +#ifndef _ARM_DELAY_H +#define _ARM_DELAY_H + +extern void udelay(unsigned long usecs); + +#endif /* defined(_ARM_DELAY_H) */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/desc.h b/xen/arch/arm/include/asm/desc.h new file mode 100644 index 0000000000..a4d02d5eef --- /dev/null +++ b/xen/arch/arm/include/asm/desc.h @@ -0,0 +1,12 @@ +#ifndef __ARCH_DESC_H +#define __ARCH_DESC_H + +#endif /* __ARCH_DESC_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/device.h b/xen/arch/arm/include/asm/device.h new file mode 100644 index 0000000000..b5d451e087 --- /dev/null +++ b/xen/arch/arm/include/asm/device.h @@ -0,0 +1,124 @@ +#ifndef __ASM_ARM_DEVICE_H +#define __ASM_ARM_DEVICE_H + +enum device_type +{ + DEV_DT, + DEV_PCI, +}; + +struct dev_archdata { + void *iommu; /* IOMMU private data */ +}; + +/* struct device - The basic device structure */ +struct device +{ + enum device_type type; +#ifdef CONFIG_HAS_DEVICE_TREE + struct dt_device_node *of_node; /* Used by drivers imported from Linux */ +#endif + struct dev_archdata archdata; + struct iommu_fwspec *iommu_fwspec; /* per-device IOMMU instance data */ +}; + +typedef struct device device_t; + +#include + +#define dev_is_pci(dev) ((dev)->type == DEV_PCI) +#define dev_is_dt(dev) ((dev)->type == DEV_DT) + +enum device_class +{ + DEVICE_SERIAL, + DEVICE_IOMMU, + DEVICE_GIC, + DEVICE_PCI_HOSTBRIDGE, + /* Use for error */ + DEVICE_UNKNOWN, +}; + +struct device_desc { + /* Device name */ + const char *name; + /* Device class */ + enum device_class class; + /* List of devices supported by this driver */ + const struct dt_device_match *dt_match; + /* + * Device initialization. + * + * -EAGAIN is used to indicate that device probing is deferred. + */ + int (*init)(struct dt_device_node *dev, const void *data); +}; + +struct acpi_device_desc { + /* Device name */ + const char *name; + /* Device class */ + enum device_class class; + /* type of device supported by the driver */ + const int class_type; + /* Device initialization */ + int (*init)(const void *data); +}; + +/** + * acpi_device_init - Initialize a device + * @class: class of the device (serial, network...) + * @data: specific data for initializing the device + * + * Return 0 on success. + */ +int acpi_device_init(enum device_class class, + const void *data, int class_type); + +/** + * device_init - Initialize a device + * @dev: device to initialize + * @class: class of the device (serial, network...) + * @data: specific data for initializing the device + * + * Return 0 on success. + */ +int device_init(struct dt_device_node *dev, enum device_class class, + const void *data); + +/** + * device_get_type - Get the type of the device + * @dev: device to match + * + * Return the device type on success or DEVICE_ANY on failure + */ +enum device_class device_get_class(const struct dt_device_node *dev); + +#define DT_DEVICE_START(_name, _namestr, _class) \ +static const struct device_desc __dev_desc_##_name __used \ +__section(".dev.info") = { \ + .name = _namestr, \ + .class = _class, \ + +#define DT_DEVICE_END \ +}; + +#define ACPI_DEVICE_START(_name, _namestr, _class) \ +static const struct acpi_device_desc __dev_desc_##_name __used \ +__section(".adev.info") = { \ + .name = _namestr, \ + .class = _class, \ + +#define ACPI_DEVICE_END \ +}; + +#endif /* __ASM_ARM_DEVICE_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/div64.h b/xen/arch/arm/include/asm/div64.h new file mode 100644 index 0000000000..1cd58bc51a --- /dev/null +++ b/xen/arch/arm/include/asm/div64.h @@ -0,0 +1,250 @@ +/* Taken from Linux arch/arm */ +#ifndef __ASM_ARM_DIV64 +#define __ASM_ARM_DIV64 + +#include +#include + +/* + * The semantics of do_div() are: + * + * uint32_t do_div(uint64_t *n, uint32_t base) + * { + * uint32_t remainder = *n % base; + * *n = *n / base; + * return remainder; + * } + * + * In other words, a 64-bit dividend with a 32-bit divisor producing + * a 64-bit result and a 32-bit remainder. To accomplish this optimally + * we call a special __do_div64 helper with completely non standard + * calling convention for arguments and results (beware). + */ + + +#if BITS_PER_LONG == 64 + +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) + +#elif BITS_PER_LONG == 32 + +#ifdef __ARMEB__ +#define __xh "r0" +#define __xl "r1" +#else +#define __xl "r0" +#define __xh "r1" +#endif + +#define __do_div_asm(n, base) \ +({ \ + register unsigned int __base asm("r4") = base; \ + register unsigned long long __n asm("r0") = n; \ + register unsigned long long __res asm("r2"); \ + register unsigned int __rem asm(__xh); \ + asm( __asmeq("%0", __xh) \ + __asmeq("%1", "r2") \ + __asmeq("%2", "r0") \ + __asmeq("%3", "r4") \ + "bl __do_div64" \ + : "=r" (__rem), "=r" (__res) \ + : "r" (__n), "r" (__base) \ + : "ip", "lr", "cc"); \ + n = __res; \ + __rem; \ +}) + +#if __GNUC__ < 4 + +/* + * gcc versions earlier than 4.0 are simply too problematic for the + * optimized implementation below. First there is gcc PR 15089 that + * tend to trig on more complex constructs, spurious .global __udivsi3 + * are inserted even if none of those symbols are referenced in the + * generated code, and those gcc versions are not able to do constant + * propagation on long long values anyway. + */ +#define do_div(n, base) __do_div_asm(n, base) + +#elif __GNUC__ >= 4 + +#include + +/* + * If the divisor happens to be constant, we determine the appropriate + * inverse at compile time to turn the division into a few inline + * multiplications instead which is much faster. And yet only if compiling + * for ARMv4 or higher (we need umull/umlal) and if the gcc version is + * sufficiently recent to perform proper long long constant propagation. + * (It is unfortunate that gcc doesn't perform all this internally.) + */ +#define do_div(n, base) \ +({ \ + unsigned int __r, __b = (base); \ + if (!__builtin_constant_p(__b) || __b == 0) { \ + /* non-constant divisor (or zero): slow path */ \ + __r = __do_div_asm(n, __b); \ + } else if ((__b & (__b - 1)) == 0) { \ + /* Trivial: __b is constant and a power of 2 */ \ + /* gcc does the right thing with this code. */ \ + __r = n; \ + __r &= (__b - 1); \ + n /= __b; \ + } else { \ + /* Multiply by inverse of __b: n/b = n*(p/b)/p */ \ + /* We rely on the fact that most of this code gets */ \ + /* optimized away at compile time due to constant */ \ + /* propagation and only a couple inline assembly */ \ + /* instructions should remain. Better avoid any */ \ + /* code construct that might prevent that. */ \ + unsigned long long __res, __x, __t, __m, __n = n; \ + unsigned int __c, __p, __z = 0; \ + /* preserve low part of n for reminder computation */ \ + __r = __n; \ + /* determine number of bits to represent __b */ \ + __p = 1 << __div64_fls(__b); \ + /* compute __m = ((__p << 64) + __b - 1) / __b */ \ + __m = (~0ULL / __b) * __p; \ + __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \ + /* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \ + __x = ~0ULL / __b * __b - 1; \ + __res = (__m & 0xffffffff) * (__x & 0xffffffff); \ + __res >>= 32; \ + __res += (__m & 0xffffffff) * (__x >> 32); \ + __t = __res; \ + __res += (__x & 0xffffffff) * (__m >> 32); \ + __t = (__res < __t) ? (1ULL << 32) : 0; \ + __res = (__res >> 32) + __t; \ + __res += (__m >> 32) * (__x >> 32); \ + __res /= __p; \ + /* Now sanitize and optimize what we've got. */ \ + if (~0ULL % (__b / (__b & -__b)) == 0) { \ + /* those cases can be simplified with: */ \ + __n /= (__b & -__b); \ + __m = ~0ULL / (__b / (__b & -__b)); \ + __p = 1; \ + __c = 1; \ + } else if (__res != __x / __b) { \ + /* We can't get away without a correction */ \ + /* to compensate for bit truncation errors. */ \ + /* To avoid it we'd need an additional bit */ \ + /* to represent __m which would overflow it. */ \ + /* Instead we do m=p/b and n/b=(n*m+m)/p. */ \ + __c = 1; \ + /* Compute __m = (__p << 64) / __b */ \ + __m = (~0ULL / __b) * __p; \ + __m += ((~0ULL % __b + 1) * __p) / __b; \ + } else { \ + /* Reduce __m/__p, and try to clear bit 31 */ \ + /* of __m when possible otherwise that'll */ \ + /* need extra overflow handling later. */ \ + unsigned int __bits = -(__m & -__m); \ + __bits |= __m >> 32; \ + __bits = (~__bits) << 1; \ + /* If __bits == 0 then setting bit 31 is */ \ + /* unavoidable. Simply apply the maximum */ \ + /* possible reduction in that case. */ \ + /* Otherwise the MSB of __bits indicates the */ \ + /* best reduction we should apply. */ \ + if (!__bits) { \ + __p /= (__m & -__m); \ + __m /= (__m & -__m); \ + } else { \ + __p >>= __div64_fls(__bits); \ + __m >>= __div64_fls(__bits); \ + } \ + /* No correction needed. */ \ + __c = 0; \ + } \ + /* Now we have a combination of 2 conditions: */ \ + /* 1) whether or not we need a correction (__c), and */ \ + /* 2) whether or not there might be an overflow in */ \ + /* the cross product (__m & ((1<<63) | (1<<31))) */ \ + /* Select the best insn combination to perform the */ \ + /* actual __m * __n / (__p << 64) operation. */ \ + if (!__c) { \ + asm ( "umull %Q0, %R0, %1, %Q2\n\t" \ + "mov %Q0, #0" \ + : "=&r" (__res) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ + __res = __m; \ + asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \ + "mov %Q0, #0" \ + : "+&r" (__res) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } else { \ + asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \ + "cmn %Q0, %Q1\n\t" \ + "adcs %R0, %R0, %R1\n\t" \ + "adc %Q0, %3, #0" \ + : "=&r" (__res) \ + : "r" (__m), "r" (__n), "r" (__z) \ + : "cc" ); \ + } \ + if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ + asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \ + "umlal %R0, %Q0, %Q1, %R2\n\t" \ + "mov %R0, #0\n\t" \ + "umlal %Q0, %R0, %R1, %R2" \ + : "+&r" (__res) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } else { \ + asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \ + "umlal %R0, %1, %Q2, %R3\n\t" \ + "mov %R0, #0\n\t" \ + "adds %Q0, %1, %Q0\n\t" \ + "adc %R0, %R0, #0\n\t" \ + "umlal %Q0, %R0, %R2, %R3" \ + : "+&r" (__res), "+&r" (__z) \ + : "r" (__m), "r" (__n) \ + : "cc" ); \ + } \ + __res /= __p; \ + /* The reminder can be computed with 32-bit regs */ \ + /* only, and gcc is good at that. */ \ + { \ + unsigned int __res0 = __res; \ + unsigned int __b0 = __b; \ + __r -= __res0 * __b0; \ + } \ + /* BUG_ON(__r >= __b || __res * __b + __r != n); */ \ + n = __res; \ + } \ + __r; \ +}) + +/* our own fls implementation to make sure constant propagation is fine */ +#define __div64_fls(bits) \ +({ \ + unsigned int __left = (bits), __nr = 0; \ + if (__left & 0xffff0000) __nr += 16, __left >>= 16; \ + if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \ + if (__left & 0x000000f0) __nr += 4, __left >>= 4; \ + if (__left & 0x0000000c) __nr += 2, __left >>= 2; \ + if (__left & 0x00000002) __nr += 1; \ + __nr; \ +}) + +#endif /* GCC version */ + +#endif /* BITS_PER_LONG */ + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/domain.h b/xen/arch/arm/include/asm/domain.h new file mode 100644 index 0000000000..9b3647587a --- /dev/null +++ b/xen/arch/arm/include/asm/domain.h @@ -0,0 +1,279 @@ +#ifndef __ASM_DOMAIN_H__ +#define __ASM_DOMAIN_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct hvm_domain +{ + uint64_t params[HVM_NR_PARAMS]; +}; + +#ifdef CONFIG_ARM_64 +enum domain_type { + DOMAIN_32BIT, + DOMAIN_64BIT, +}; +#define is_32bit_domain(d) ((d)->arch.type == DOMAIN_32BIT) +#define is_64bit_domain(d) ((d)->arch.type == DOMAIN_64BIT) +#else +#define is_32bit_domain(d) (1) +#define is_64bit_domain(d) (0) +#endif + +/* The hardware domain has always its memory direct mapped. */ +#define is_domain_direct_mapped(d) is_hardware_domain(d) + +struct vtimer { + struct vcpu *v; + int irq; + struct timer timer; + register_t ctl; + uint64_t cval; +}; + +struct arch_domain +{ +#ifdef CONFIG_ARM_64 + enum domain_type type; +#endif + + /* Virtual MMU */ + struct p2m_domain p2m; + + struct hvm_domain hvm; + + struct vmmio vmmio; + + /* Continuable domain_relinquish_resources(). */ + unsigned int rel_priv; + + struct { + uint64_t offset; + } virt_timer_base; + + struct vgic_dist vgic; + + struct vuart { +#define VUART_BUF_SIZE 128 + char *buf; + int idx; + const struct vuart_info *info; + spinlock_t lock; + } vuart; + + unsigned int evtchn_irq; +#ifdef CONFIG_ACPI + void *efi_acpi_table; + paddr_t efi_acpi_gpa; + paddr_t efi_acpi_len; +#endif + + /* Monitor options */ + struct { + uint8_t privileged_call_enabled : 1; + } monitor; + +#ifdef CONFIG_SBSA_VUART_CONSOLE + struct vpl011 vpl011; +#endif + +#ifdef CONFIG_TEE + void *tee; +#endif +} __cacheline_aligned; + +struct arch_vcpu +{ + struct { +#ifdef CONFIG_ARM_32 + register_t r4; + register_t r5; + register_t r6; + register_t r7; + register_t r8; + register_t r9; + register_t sl; +#else + register_t x19; + register_t x20; + register_t x21; + register_t x22; + register_t x23; + register_t x24; + register_t x25; + register_t x26; + register_t x27; + register_t x28; +#endif + register_t fp; + register_t sp; + register_t pc; + } saved_context; + + void *stack; + + /* + * Points into ->stack, more convenient than doing pointer arith + * all the time. + */ + struct cpu_info *cpu_info; + + /* Fault Status */ +#ifdef CONFIG_ARM_32 + uint32_t dfsr; + uint32_t dfar, ifar; +#else + uint64_t far; + uint32_t esr; +#endif + + uint32_t ifsr; /* 32-bit guests only */ + uint32_t afsr0, afsr1; + + /* MMU */ + register_t vbar; + register_t ttbcr; + uint64_t ttbr0, ttbr1; + + uint32_t dacr; /* 32-bit guests only */ + uint64_t par; +#ifdef CONFIG_ARM_32 + uint32_t mair0, mair1; + uint32_t amair0, amair1; +#else + uint64_t mair; + uint64_t amair; +#endif + + /* Control Registers */ + register_t sctlr; + register_t actlr; + uint32_t cpacr; + + uint32_t contextidr; + register_t tpidr_el0; + register_t tpidr_el1; + register_t tpidrro_el0; + + /* HYP configuration */ + register_t hcr_el2; + register_t mdcr_el2; + + uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */ +#ifdef CONFIG_ARM_32 + /* + * ARMv8 only supports a trivial implementation on Jazelle when in AArch32 + * mode and therefore has no extended control registers. + */ + uint32_t joscr, jmcr; +#endif + + /* Float-pointer */ + struct vfp_state vfp; + + /* CP 15 */ + uint32_t csselr; + register_t vmpidr; + + /* Holds gic context data */ + union gic_state_data gic; + uint64_t lr_mask; + + struct vgic_cpu vgic; + + /* Timer registers */ + register_t cntkctl; + + struct vtimer phys_timer; + struct vtimer virt_timer; + bool vtimer_initialized; + + /* + * The full P2M may require some cleaning (e.g when emulation + * set/way). As the action can take a long time, it requires + * preemption. It is deferred until we return to guest, where we can + * more easily check for softirqs and preempt the vCPU safely. + */ + bool need_flush_to_ram; + +} __cacheline_aligned; + +void vcpu_show_execution_state(struct vcpu *); +void vcpu_show_registers(const struct vcpu *); +void vcpu_switch_to_aarch64_mode(struct vcpu *); + +/* + * Due to the restriction of GICv3, the number of vCPUs in AFF0 is + * limited to 16, thus only the first 4 bits of AFF0 are legal. We will + * use the first 2 affinity levels here, expanding the number of vCPU up + * to 4096(==16*256), which is more than the PEs that GIC-500 supports. + * + * Since we don't save information of vCPU's topology (affinity) in + * vMPIDR at the moment, we map the vcpuid to the vMPIDR linearly. + */ +static inline unsigned int vaffinity_to_vcpuid(register_t vaff) +{ + unsigned int vcpuid; + + vaff &= MPIDR_HWID_MASK; + + vcpuid = MPIDR_AFFINITY_LEVEL(vaff, 0); + vcpuid |= MPIDR_AFFINITY_LEVEL(vaff, 1) << 4; + + return vcpuid; +} + +static inline register_t vcpuid_to_vaffinity(unsigned int vcpuid) +{ + register_t vaff; + + /* + * Right now only AFF0 and AFF1 are supported in virtual affinity. + * Since only the first 4 bits in AFF0 are used in GICv3, the + * available bits are 12 (4+8). + */ + BUILD_BUG_ON(!(MAX_VIRT_CPUS < ((1 << 12)))); + + vaff = (vcpuid & 0x0f) << MPIDR_LEVEL_SHIFT(0); + vaff |= ((vcpuid >> 4) & MPIDR_LEVEL_MASK) << MPIDR_LEVEL_SHIFT(1); + + return vaff; +} + +static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void) +{ + return xmalloc(struct vcpu_guest_context); +} + +static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc) +{ + xfree(vgc); +} + +static inline void arch_vcpu_block(struct vcpu *v) {} + +#define arch_vm_assist_valid_mask(d) (1UL << VMASST_TYPE_runstate_update_flag) + +/* vPCI is not available on Arm */ +#define has_vpci(d) ({ (void)(d); false; }) + +#endif /* __ASM_DOMAIN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/domain_build.h b/xen/arch/arm/include/asm/domain_build.h new file mode 100644 index 0000000000..34ceddc995 --- /dev/null +++ b/xen/arch/arm/include/asm/domain_build.h @@ -0,0 +1,31 @@ +#ifndef __ASM_DOMAIN_BUILD_H__ +#define __ASM_DOMAIN_BUILD_H__ + +#include +#include + +int map_irq_to_domain(struct domain *d, unsigned int irq, + bool need_mapping, const char *devname); +int make_chosen_node(const struct kernel_info *kinfo); +void evtchn_allocate(struct domain *d); + +#ifndef CONFIG_ACPI +static inline int prepare_acpi(struct domain *d, struct kernel_info *kinfo) +{ + /* Only booting with ACPI will hit here */ + BUG(); + return -EINVAL; +} +#else +int prepare_acpi(struct domain *d, struct kernel_info *kinfo); +#endif +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/early_printk.h b/xen/arch/arm/include/asm/early_printk.h new file mode 100644 index 0000000000..8dc911cf48 --- /dev/null +++ b/xen/arch/arm/include/asm/early_printk.h @@ -0,0 +1,23 @@ +/* + * printk() for use before the final page tables are setup. + * + * Copyright (C) 2012 Citrix Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ARM_EARLY_PRINTK_H__ +#define __ARM_EARLY_PRINTK_H__ + +#include + +#ifdef CONFIG_EARLY_PRINTK + +/* need to add the uart address offset in page to the fixmap address */ +#define EARLY_UART_VIRTUAL_ADDRESS \ + (FIXMAP_ADDR(FIXMAP_CONSOLE) + (CONFIG_EARLY_UART_BASE_ADDRESS & ~PAGE_MASK)) + +#endif /* !CONFIG_EARLY_PRINTK */ + +#endif diff --git a/xen/arch/arm/include/asm/efibind.h b/xen/arch/arm/include/asm/efibind.h new file mode 100644 index 0000000000..09dca7a8c9 --- /dev/null +++ b/xen/arch/arm/include/asm/efibind.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/xen/arch/arm/include/asm/elf.h b/xen/arch/arm/include/asm/elf.h new file mode 100644 index 0000000000..9e436e7556 --- /dev/null +++ b/xen/arch/arm/include/asm/elf.h @@ -0,0 +1,33 @@ +#ifndef __ARM_ELF_H__ +#define __ARM_ELF_H__ + +typedef struct { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long sp; + unsigned long lr; + unsigned long pc; +} ELF_Gregset; + +#endif /* __ARM_ELF_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/event.h b/xen/arch/arm/include/asm/event.h new file mode 100644 index 0000000000..b14c166ad6 --- /dev/null +++ b/xen/arch/arm/include/asm/event.h @@ -0,0 +1,63 @@ +#ifndef __ASM_EVENT_H__ +#define __ASM_EVENT_H__ + +#include + +void vcpu_kick(struct vcpu *v); +void vcpu_mark_events_pending(struct vcpu *v); +void vcpu_update_evtchn_irq(struct vcpu *v); +void vcpu_block_unless_event_pending(struct vcpu *v); + +static inline int vcpu_event_delivery_is_enabled(struct vcpu *v) +{ + struct cpu_user_regs *regs = &v->arch.cpu_info->guest_cpu_user_regs; + return !(regs->cpsr & PSR_IRQ_MASK); +} + +static inline int local_events_need_delivery_nomask(void) +{ + /* XXX: if the first interrupt has already been delivered, we should + * check whether any other interrupts with priority higher than the + * one in GICV_IAR are in the lr_pending queue or in the LR + * registers and return 1 only in that case. + * In practice the guest interrupt handler should run with + * interrupts disabled so this shouldn't be a problem in the general + * case. + */ + if ( vgic_vcpu_pending_irq(current) ) + return 1; + + if ( !vcpu_info(current, evtchn_upcall_pending) ) + return 0; + + return vgic_evtchn_irq_pending(current); +} + +static inline int local_events_need_delivery(void) +{ + if ( !vcpu_event_delivery_is_enabled(current) ) + return 0; + return local_events_need_delivery_nomask(); +} + +static inline void local_event_delivery_enable(void) +{ + struct cpu_user_regs *regs = guest_cpu_user_regs(); + regs->cpsr &= ~PSR_IRQ_MASK; +} + +/* No arch specific virq definition now. Default to global. */ +static inline bool arch_virq_is_global(unsigned int virq) +{ + return true; +} + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/exynos4210-uart.h b/xen/arch/arm/include/asm/exynos4210-uart.h new file mode 100644 index 0000000000..e2ab4a43e4 --- /dev/null +++ b/xen/arch/arm/include/asm/exynos4210-uart.h @@ -0,0 +1,112 @@ +/* + * xen/include/asm-arm/exynos4210-uart.h + * + * Common constant definition between early printk and the UART driver + * for the exynos 4210 UART + * + * Julien Grall + * Copyright (c) 2013 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_EXYNOS4210_H +#define __ASM_ARM_EXYNOS4210_H + + +/* + * this value is only valid for UART 2 and UART 3 + * XXX: define per UART + */ +#define FIFO_MAX_SIZE 16 + +/* register addresses */ +#define ULCON (0x00) +#define UCON (0x04) +#define UFCON (0x08) +#define UMCON (0x0c) +#define UTRSTAT (0x10) +#define UERSTAT (0x14) +#define UFSTAT (0x18) +#define UMSTAT (0x1c) +#define UTXH (0x20) +#define URXH (0x24) +#define UBRDIV (0x28) +#define UFRACVAL (0x2c) +#define UINTP (0x30) +#define UINTS (0x34) +#define UINTM (0x38) + +/* UCON */ +#define UCON_RX_IRQ (1 << 0) +#define UCON_TX_IRQ (1 << 2) +#define UCON_RX_TIMEOUT (1 << 7) + +/* + * FIXME: IRQ_LEVEL should be 1 << n but with this value, the IRQ + * handler will never end... + */ +#define UCON_RX_IRQ_LEVEL (0 << 8) +#define UCON_TX_IRQ_LEVEL (0 << 9) + +/* ULCON */ +#define ULCON_STOPB_SHIFT 2 +#define ULCON_PARITY_SHIFT 3 + +/* UFCON */ +#define UFCON_FIFO_TX_RESET (1 << 2) +#define UFCON_FIFO_RX_RESET (1 << 1) +#define UFCON_FIFO_RESET (UFCON_FIFO_TX_RESET | UFCON_FIFO_RX_RESET) +#define UFCON_FIFO_EN (1 << 0) + +#define UFCON_FIFO_TX_TRIGGER (0x6 << 8) + +/* UMCON */ +#define UMCON_INT_EN (1 << 3) + +/* UERSTAT */ +#define UERSTAT_OVERRUN (1 << 0) +#define UERSTAT_PARITY (1 << 1) +#define UERSTAT_FRAME (1 << 2) +#define UERSTAT_BREAK (1 << 3) + +/* UFSTAT */ +#define UFSTAT_TX_FULL (1 << 24) +#define UFSTAT_TX_COUNT_SHIFT (16) +#define UFSTAT_TX_COUNT_MASK (0xff << UFSTAT_TX_COUNT_SHIFT) +#define UFSTAT_RX_FULL (1 << 8) +#define UFSTAT_RX_COUNT_SHIFT (0) +#define UFSTAT_RX_COUNT_MASK (0xff << UFSTAT_RX_COUNT_SHIFT) + +/* UTRSTAT */ +#define UTRSTAT_TXFE (1 << 1) +#define UTRSTAT_TXE (1 << 2) + +/* URHX */ +#define URXH_DATA_MASK (0xff) + +/* Interrupt bits (UINTP, UINTS, UINTM) */ +#define UINTM_MODEM (1 << 3) +#define UINTM_TXD (1 << 2) +#define UINTM_ERROR (1 << 1) +#define UINTM_RXD (1 << 0) +#define UINTM_ALLI (UINTM_MODEM | UINTM_TXD | UINTM_ERROR | UINTM_RXD) + +#endif /* __ASM_ARM_EXYNOS4210_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/flushtlb.h b/xen/arch/arm/include/asm/flushtlb.h new file mode 100644 index 0000000000..125a141975 --- /dev/null +++ b/xen/arch/arm/include/asm/flushtlb.h @@ -0,0 +1,77 @@ +#ifndef __ASM_ARM_FLUSHTLB_H__ +#define __ASM_ARM_FLUSHTLB_H__ + +#include + +/* + * Filter the given set of CPUs, removing those that definitely flushed their + * TLB since @page_timestamp. + */ +/* XXX lazy implementation just doesn't clear anything.... */ +static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) {} + +#define tlbflush_current_time() (0) + +static inline void page_set_tlbflush_timestamp(struct page_info *page) +{ + page->tlbflush_timestamp = tlbflush_current_time(); +} + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +/* Flush specified CPUs' TLBs */ +void arch_flush_tlb_mask(const cpumask_t *mask); + +/* + * Flush a range of VA's hypervisor mappings from the TLB of the local + * processor. + */ +static inline void flush_xen_tlb_range_va_local(vaddr_t va, + unsigned long size) +{ + vaddr_t end = va + size; + + dsb(sy); /* Ensure preceding are visible */ + while ( va < end ) + { + __flush_xen_tlb_one_local(va); + va += PAGE_SIZE; + } + dsb(sy); /* Ensure completion of the TLB flush */ + isb(); +} + +/* + * Flush a range of VA's hypervisor mappings from the TLB of all + * processors in the inner-shareable domain. + */ +static inline void flush_xen_tlb_range_va(vaddr_t va, + unsigned long size) +{ + vaddr_t end = va + size; + + dsb(sy); /* Ensure preceding are visible */ + while ( va < end ) + { + __flush_xen_tlb_one(va); + va += PAGE_SIZE; + } + dsb(sy); /* Ensure completion of the TLB flush */ + isb(); +} + +#endif /* __ASM_ARM_FLUSHTLB_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/gic.h b/xen/arch/arm/include/asm/gic.h new file mode 100644 index 0000000000..c7f0c343d1 --- /dev/null +++ b/xen/arch/arm/include/asm/gic.h @@ -0,0 +1,459 @@ +/* + * ARM Generic Interrupt Controller support + * + * Tim Deegan + * Copyright (c) 2011 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_GIC_H__ +#define __ASM_ARM_GIC_H__ + +#define NR_GIC_LOCAL_IRQS NR_LOCAL_IRQS +#define NR_GIC_SGI 16 + +#define GICD_CTLR (0x000) +#define GICD_TYPER (0x004) +#define GICD_IIDR (0x008) +#define GICD_IGROUPR (0x080) +#define GICD_IGROUPRN (0x0FC) +#define GICD_ISENABLER (0x100) +#define GICD_ISENABLERN (0x17C) +#define GICD_ICENABLER (0x180) +#define GICD_ICENABLERN (0x1fC) +#define GICD_ISPENDR (0x200) +#define GICD_ISPENDRN (0x27C) +#define GICD_ICPENDR (0x280) +#define GICD_ICPENDRN (0x2FC) +#define GICD_ISACTIVER (0x300) +#define GICD_ISACTIVERN (0x37C) +#define GICD_ICACTIVER (0x380) +#define GICD_ICACTIVERN (0x3FC) +#define GICD_IPRIORITYR (0x400) +#define GICD_IPRIORITYRN (0x7F8) +#define GICD_ITARGETSR (0x800) +#define GICD_ITARGETSR7 (0x81C) +#define GICD_ITARGETSR8 (0x820) +#define GICD_ITARGETSRN (0xBF8) +#define GICD_ICFGR (0xC00) +#define GICD_ICFGR1 (0xC04) +#define GICD_ICFGR2 (0xC08) +#define GICD_ICFGRN (0xCFC) +#define GICD_NSACR (0xE00) +#define GICD_NSACRN (0xEFC) +#define GICD_SGIR (0xF00) +#define GICD_CPENDSGIR (0xF10) +#define GICD_CPENDSGIRN (0xF1C) +#define GICD_SPENDSGIR (0xF20) +#define GICD_SPENDSGIRN (0xF2C) +#define GICD_ICPIDR2 (0xFE8) + +#define GICD_SGI_TARGET_LIST_SHIFT (24) +#define GICD_SGI_TARGET_LIST_MASK (0x3UL << GICD_SGI_TARGET_LIST_SHIFT) +#define GICD_SGI_TARGET_LIST (0UL<> 3) /* GICH_LR and GICH_VMCR only support + 5 bits for guest irq priority */ + +#define GICH_LR_PENDING 1 +#define GICH_LR_ACTIVE 2 + +#ifndef __ASSEMBLY__ +#include +#include + +#define DT_COMPAT_GIC_CORTEX_A15 "arm,cortex-a15-gic" + +#define DT_MATCH_GIC_V2 \ + DT_MATCH_COMPATIBLE(DT_COMPAT_GIC_CORTEX_A15), \ + DT_MATCH_COMPATIBLE("arm,cortex-a7-gic"), \ + DT_MATCH_COMPATIBLE("arm,gic-400") + +#define DT_MATCH_GIC_V3 DT_MATCH_COMPATIBLE("arm,gic-v3") + +#ifdef CONFIG_GICV3 +/* + * GICv3 registers that needs to be saved/restored + */ +struct gic_v3 { + register_t vmcr, sre_el1; + register_t apr0[4]; + register_t apr1[4]; + uint64_t lr[16]; +}; +#endif + +/* + * GICv2 register that needs to be saved/restored + * on VCPU context switch + */ +struct gic_v2 { + uint32_t hcr; + uint32_t vmcr; + uint32_t apr; + uint32_t lr[64]; +}; + +/* + * Union to hold underlying hw version context information + */ +union gic_state_data { + struct gic_v2 v2; +#ifdef CONFIG_GICV3 + struct gic_v3 v3; +#endif +}; + +/* + * Decode LR register content. + * The LR register format is different for GIC HW version + */ +struct gic_lr { + /* Virtual IRQ */ + uint32_t virq; + uint8_t priority; + bool active; + bool pending; + bool hw_status; + union + { + /* Only filled when there are a corresponding pIRQ (hw_state = true) */ + struct + { + uint32_t pirq; + } hw; + /* Only filled when there are no corresponding pIRQ (hw_state = false) */ + struct + { + bool eoi; + uint8_t source; /* GICv2 only */ + } virt; + }; +}; + +enum gic_version { + GIC_INVALID = 0, /* the default until explicitly set up */ + GIC_V2, + GIC_V3, +}; + +DECLARE_PER_CPU(uint64_t, lr_mask); + +extern enum gic_version gic_hw_version(void); + +/* Program the IRQ type into the GIC */ +void gic_set_irq_type(struct irq_desc *desc, unsigned int type); + +/* Program the GIC to route an interrupt */ +extern void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority); +extern int gic_route_irq_to_guest(struct domain *, unsigned int virq, + struct irq_desc *desc, + unsigned int priority); + +/* Remove an IRQ passthrough to a guest */ +int gic_remove_irq_from_guest(struct domain *d, unsigned int virq, + struct irq_desc *desc); + +extern void gic_clear_pending_irqs(struct vcpu *v); + +extern void init_maintenance_interrupt(void); +extern void gic_raise_guest_irq(struct vcpu *v, unsigned int irq, + unsigned int priority); +extern void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq); + +/* Accept an interrupt from the GIC and dispatch its handler */ +extern void gic_interrupt(struct cpu_user_regs *regs, int is_fiq); +/* Find the interrupt controller and set up the callback to translate + * device tree IRQ. + */ +extern void gic_preinit(void); +/* Bring up the interrupt controller, and report # cpus attached */ +extern void gic_init(void); +/* Bring up a secondary CPU's per-CPU GIC interface */ +extern void gic_init_secondary_cpu(void); +/* Take down a CPU's per-CPU GIC interface */ +extern void gic_disable_cpu(void); +/* setup the gic virtual interface for a guest */ +extern int gicv_setup(struct domain *d); + +/* Context switch */ +extern void gic_save_state(struct vcpu *v); +extern void gic_restore_state(struct vcpu *v); + +/* SGI (AKA IPIs) */ +enum gic_sgi { + GIC_SGI_EVENT_CHECK, + GIC_SGI_DUMP_STATE, + GIC_SGI_CALL_FUNCTION, + GIC_SGI_MAX, +}; + +/* SGI irq mode types */ +enum gic_sgi_mode { + SGI_TARGET_LIST, + SGI_TARGET_OTHERS, + SGI_TARGET_SELF, +}; + +extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi); +extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi); +extern void send_SGI_self(enum gic_sgi sgi); +extern void send_SGI_allbutself(enum gic_sgi sgi); + +/* print useful debug info */ +extern void gic_dump_info(struct vcpu *v); +extern void gic_dump_vgic_info(struct vcpu *v); + +/* Number of interrupt lines */ +extern unsigned int gic_number_lines(void); + +/* IRQ translation function for the device tree */ +int gic_irq_xlate(const u32 *intspec, unsigned int intsize, + unsigned int *out_hwirq, unsigned int *out_type); + +struct gic_info { + /* GIC version */ + enum gic_version hw_version; + /* Number of GIC lines supported */ + unsigned int nr_lines; + /* Number of LR registers */ + uint8_t nr_lrs; + /* Maintenance irq number */ + unsigned int maintenance_irq; + /* Pointer to the device tree node representing the interrupt controller */ + const struct dt_device_node *node; +}; + +struct gic_hw_operations { + /* Hold GIC HW information */ + const struct gic_info *info; + /* Initialize the GIC and the boot CPU */ + int (*init)(void); + /* Save GIC registers */ + void (*save_state)(struct vcpu *); + /* Restore GIC registers */ + void (*restore_state)(const struct vcpu *); + /* Dump GIC LR register information */ + void (*dump_state)(const struct vcpu *); + + /* hw_irq_controller to enable/disable/eoi host irq */ + hw_irq_controller *gic_host_irq_type; + + /* hw_irq_controller to enable/disable/eoi guest irq */ + hw_irq_controller *gic_guest_irq_type; + + /* End of Interrupt */ + void (*eoi_irq)(struct irq_desc *irqd); + /* Deactivate/reduce priority of irq */ + void (*deactivate_irq)(struct irq_desc *irqd); + /* Read IRQ id and Ack */ + unsigned int (*read_irq)(void); + /* Force the active state of an IRQ by accessing the distributor */ + void (*set_active_state)(struct irq_desc *irqd, bool state); + /* Force the pending state of an IRQ by accessing the distributor */ + void (*set_pending_state)(struct irq_desc *irqd, bool state); + /* Set IRQ type */ + void (*set_irq_type)(struct irq_desc *desc, unsigned int type); + /* Set IRQ priority */ + void (*set_irq_priority)(struct irq_desc *desc, unsigned int priority); + /* Send SGI */ + void (*send_SGI)(enum gic_sgi sgi, enum gic_sgi_mode irqmode, + const cpumask_t *online_mask); + /* Disable CPU physical and virtual interfaces */ + void (*disable_interface)(void); + /* Update LR register with state and priority */ + void (*update_lr)(int lr, unsigned int virq, uint8_t priority, + unsigned int hw_irq, unsigned int state); + /* Update HCR status register */ + void (*update_hcr_status)(uint32_t flag, bool set); + /* Clear LR register */ + void (*clear_lr)(int lr); + /* Read LR register and populate gic_lr structure */ + void (*read_lr)(int lr, struct gic_lr *); + /* Write LR register from gic_lr structure */ + void (*write_lr)(int lr, const struct gic_lr *); + /* Read VMCR priority */ + unsigned int (*read_vmcr_priority)(void); + /* Read APRn register */ + unsigned int (*read_apr)(int apr_reg); + /* Query the pending state of an interrupt at the distributor level. */ + bool (*read_pending_state)(struct irq_desc *irqd); + /* Secondary CPU init */ + int (*secondary_init)(void); + /* Create GIC node for the hardware domain */ + int (*make_hwdom_dt_node)(const struct domain *d, + const struct dt_device_node *gic, void *fdt); +#ifdef CONFIG_ACPI + /* Create MADT table for the hardware domain */ + int (*make_hwdom_madt)(const struct domain *d, u32 offset); + /* Query the size of hardware domain madt table */ + unsigned long (*get_hwdom_extra_madt_size)(const struct domain *d); +#endif + /* Map extra GIC MMIO, irqs and other hw stuffs to the hardware domain. */ + int (*map_hwdom_extra_mappings)(struct domain *d); + /* Deny access to GIC regions */ + int (*iomem_deny_access)(const struct domain *d); + /* Handle LPIs, which require special handling */ + void (*do_LPI)(unsigned int lpi); +}; + +extern const struct gic_hw_operations *gic_hw_ops; + +static inline unsigned int gic_get_nr_lrs(void) +{ + return gic_hw_ops->info->nr_lrs; +} + +/* + * Set the active state of an IRQ. This should be used with care, as this + * directly forces the active bit, without considering the GIC state machine. + * For private IRQs this only works for those of the current CPU. + * + * This function should only be called for interrupts routed to the + * guest. The flow of interrupts routed to Xen is not able cope with + * software changes to the active state. + */ +static inline void gic_set_active_state(struct irq_desc *irqd, bool state) +{ + ASSERT(test_bit(_IRQ_GUEST, &irqd->status)); + gic_hw_ops->set_active_state(irqd, state); +} + +/* + * Set the pending state of an IRQ. This should be used with care, as this + * directly forces the pending bit, without considering the GIC state machine. + * For private IRQs this only works for those of the current CPU. + */ +static inline void gic_set_pending_state(struct irq_desc *irqd, bool state) +{ + gic_hw_ops->set_pending_state(irqd, state); +} + +/* + * Read the pending state of an interrupt from the distributor. + * For private IRQs this only works for those of the current CPU. + */ +static inline bool gic_read_pending_state(struct irq_desc *irqd) +{ + return gic_hw_ops->read_pending_state(irqd); +} + +void register_gic_ops(const struct gic_hw_operations *ops); +int gic_make_hwdom_dt_node(const struct domain *d, + const struct dt_device_node *gic, + void *fdt); + +#ifdef CONFIG_ACPI +int gic_make_hwdom_madt(const struct domain *d, u32 offset); +unsigned long gic_get_hwdom_madt_size(const struct domain *d); +#endif + +int gic_map_hwdom_extra_mappings(struct domain *d); +int gic_iomem_deny_access(const struct domain *d); + +#endif /* __ASSEMBLY__ */ +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/gic_v3_defs.h b/xen/arch/arm/include/asm/gic_v3_defs.h new file mode 100644 index 0000000000..34ed5f857d --- /dev/null +++ b/xen/arch/arm/include/asm/gic_v3_defs.h @@ -0,0 +1,220 @@ +/* + * ARM Generic Interrupt Controller v3 definitions + * + * Vijaya Kumar K + * Copyright (c) 2014 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_GIC_V3_DEFS_H__ +#define __ASM_ARM_GIC_V3_DEFS_H__ + +#include + +/* + * Additional registers defined in GIC v3. + * Common GICD registers are defined in gic.h + */ + +#define GICD_STATUSR (0x010) +#define GICD_SETSPI_NSR (0x040) +#define GICD_CLRSPI_NSR (0x048) +#define GICD_SETSPI_SR (0x050) +#define GICD_CLRSPI_SR (0x058) +#define GICD_IROUTER (0x6000) +#define GICD_IROUTER32 (0x6100) +#define GICD_IROUTER1019 (0x7FD8) +#define GICD_PIDR2 (0xFFE8) + +/* Common between GICD_PIDR2 and GICR_PIDR2 */ +#define GIC_PIDR2_ARCH_MASK (0xf0) +#define GIC_PIDR2_ARCH_GICv3 (0x30) +#define GIC_PIDR2_ARCH_GICv4 (0x40) + +#define GICC_SRE_EL2_SRE (1UL << 0) +#define GICC_SRE_EL2_DFB (1UL << 1) +#define GICC_SRE_EL2_DIB (1UL << 2) +#define GICC_SRE_EL2_ENEL1 (1UL << 3) + +#define GICC_IAR_INTID_MASK (0xFFFFFF) + +/* Additional bits in GICD_TYPER defined by GICv3 */ +#define GICD_TYPE_ID_BITS_SHIFT 19 +#define GICD_TYPE_ID_BITS(r) ((((r) >> GICD_TYPE_ID_BITS_SHIFT) & 0x1f) + 1) + +#define GICD_TYPE_LPIS (1U << 17) + +#define GICD_CTLR_RWP (1UL << 31) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) +#define GICD_IROUTER_SPI_MODE_ANY (1UL << 31) + +#define GICC_CTLR_EL1_EOImode_drop (1U << 1) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GICR_SYNCR_NOT_BUSY 1 +/* + * Implementation defined value JEP106? + * use physical hw value for now + */ +#define GICV3_GICD_IIDR_VAL 0x34c +#define GICV3_GICR_IIDR_VAL GICV3_GICD_IIDR_VAL + +/* Two pages for the RD_base and SGI_base register frame. */ +#define GICV3_GICR_SIZE (2 * SZ_64K) + +#define GICR_CTLR (0x0000) +#define GICR_IIDR (0x0004) +#define GICR_TYPER (0x0008) +#define GICR_STATUSR (0x0010) +#define GICR_WAKER (0x0014) +#define GICR_SETLPIR (0x0040) +#define GICR_CLRLPIR (0x0048) +#define GICR_PROPBASER (0x0070) +#define GICR_PENDBASER (0x0078) +#define GICR_INVLPIR (0x00A0) +#define GICR_INVALLR (0x00B0) +#define GICR_SYNCR (0x00C0) +#define GICR_PIDR2 GICD_PIDR2 + +/* GICR for SGI's & PPI's */ + +#define GICR_IGROUPR0 (0x0080) +#define GICR_ISENABLER0 (0x0100) +#define GICR_ICENABLER0 (0x0180) +#define GICR_ISPENDR0 (0x0200) +#define GICR_ICPENDR0 (0x0280) +#define GICR_ISACTIVER0 (0x0300) +#define GICR_ICACTIVER0 (0x0380) +#define GICR_IPRIORITYR0 (0x0400) +#define GICR_IPRIORITYR7 (0x041C) +#define GICR_ICFGR0 (0x0C00) +#define GICR_ICFGR1 (0x0C04) +#define GICR_IGRPMODR0 (0x0D00) +#define GICR_NSACR (0x0E00) + +#define GICR_CTLR_ENABLE_LPIS (1U << 0) + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_PROC_NUM_SHIFT 8 +#define GICR_TYPER_PROC_NUM_MASK (0xffff << GICR_TYPER_PROC_NUM_SHIFT) + +/* For specifying the inner cacheability type only */ +#define GIC_BASER_CACHE_nCnB 0ULL +/* For specifying the outer cacheability type only */ +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL + +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL + +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT 56 +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + (7UL << GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT) +#define GICR_PROPBASER_SHAREABILITY_SHIFT 10 +#define GICR_PROPBASER_SHAREABILITY_MASK \ + (3UL << GICR_PROPBASER_SHAREABILITY_SHIFT) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT 7 +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + (7UL << GICR_PROPBASER_INNER_CACHEABILITY_SHIFT) +#define GICR_PROPBASER_RES0_MASK \ + (GENMASK(63, 59) | GENMASK(55, 52) | GENMASK(6, 5)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT 10 +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT 7 +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT 56 +#define GICR_PENDBASER_SHAREABILITY_MASK \ + (3UL << GICR_PENDBASER_SHAREABILITY_SHIFT) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + (7UL << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + (7UL << GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT) +#define GICR_PENDBASER_PTZ BIT(62, UL) +#define GICR_PENDBASER_RES0_MASK \ + (BIT(63, UL) | GENMASK(61, 59) | GENMASK(55, 52) | \ + GENMASK(15, 12) | GENMASK(6, 0)) + +#define DEFAULT_PMR_VALUE 0xff + +#define LPI_PROP_PRIO_MASK 0xfc +#define LPI_PROP_RES1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +#define ICH_VMCR_EOI (1 << 9) +#define ICH_VMCR_VENG1 (1 << 1) +#define ICH_VMCR_PRIORITY_MASK 0xff +#define ICH_VMCR_PRIORITY_SHIFT 24 + +#define ICH_LR_VIRTUAL_MASK 0xffff +#define ICH_LR_VIRTUAL_SHIFT 0 +#define ICH_LR_CPUID_MASK 0x7 +#define ICH_LR_CPUID_SHIFT 10 +#define ICH_LR_PHYSICAL_MASK 0x3ff +#define ICH_LR_PHYSICAL_SHIFT 32 +#define ICH_LR_STATE_MASK 0x3 +#define ICH_LR_STATE_SHIFT 62 +#define ICH_LR_STATE_PENDING (1ULL << 62) +#define ICH_LR_STATE_ACTIVE (1ULL << 63) +#define ICH_LR_PRIORITY_MASK 0xff +#define ICH_LR_PRIORITY_SHIFT 48 +#define ICH_LR_HW_MASK 0x1 +#define ICH_LR_HW_SHIFT 61 +#define ICH_LR_GRP_MASK 0x1 +#define ICH_LR_GRP_SHIFT 60 +#define ICH_LR_MAINTENANCE_IRQ (1UL<<41) +#define ICH_LR_GRP1 (1UL<<60) +#define ICH_LR_HW (1UL<<61) + +#define ICH_VTR_NRLRGS 0x3f +#define ICH_VTR_PRIBITS_MASK 0x7 +#define ICH_VTR_PRIBITS_SHIFT 29 + +#define ICH_SGI_IRQMODE_SHIFT 40 +#define ICH_SGI_IRQMODE_MASK 0x1 +#define ICH_SGI_TARGET_OTHERS 1UL +#define ICH_SGI_TARGET_LIST 0 +#define ICH_SGI_IRQ_SHIFT 24 +#define ICH_SGI_IRQ_MASK 0xf +#define ICH_SGI_TARGETLIST_MASK 0xffff +#define ICH_SGI_AFFx_MASK 0xff +#define ICH_SGI_AFFINITY_LEVEL(x) (16 * (x)) + +struct rdist_region { + paddr_t base; + paddr_t size; + void __iomem *map_base; + bool single_rdist; +}; + +#endif /* __ASM_ARM_GIC_V3_DEFS_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/gic_v3_its.h b/xen/arch/arm/include/asm/gic_v3_its.h new file mode 100644 index 0000000000..94e5cb99c5 --- /dev/null +++ b/xen/arch/arm/include/asm/gic_v3_its.h @@ -0,0 +1,283 @@ +/* + * ARM GICv3 ITS support + * + * Andre Przywara + * Copyright (c) 2016,2017 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef __ASM_ARM_ITS_H__ +#define __ASM_ARM_ITS_H__ + +#define GITS_CTLR 0x000 +#define GITS_IIDR 0x004 +#define GITS_TYPER 0x008 +#define GITS_CBASER 0x080 +#define GITS_CWRITER 0x088 +#define GITS_CREADR 0x090 +#define GITS_BASER_NR_REGS 8 +#define GITS_BASER0 0x100 +#define GITS_BASER1 0x108 +#define GITS_BASER2 0x110 +#define GITS_BASER3 0x118 +#define GITS_BASER4 0x120 +#define GITS_BASER5 0x128 +#define GITS_BASER6 0x130 +#define GITS_BASER7 0x138 +#define GITS_PIDR2 GICR_PIDR2 + +/* Register bits */ +#define GITS_VALID_BIT BIT(63, UL) + +#define GITS_CTLR_QUIESCENT BIT(31, UL) +#define GITS_CTLR_ENABLE BIT(0, UL) + +#define GITS_TYPER_PTA BIT(19, UL) +#define GITS_TYPER_DEVIDS_SHIFT 13 +#define GITS_TYPER_DEVIDS_MASK (0x1fUL << GITS_TYPER_DEVIDS_SHIFT) +#define GITS_TYPER_DEVICE_ID_BITS(r) (((r & GITS_TYPER_DEVIDS_MASK) >> \ + GITS_TYPER_DEVIDS_SHIFT) + 1) + +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_IDBITS_MASK (0x1fUL << GITS_TYPER_IDBITS_SHIFT) +#define GITS_TYPER_EVENT_ID_BITS(r) (((r & GITS_TYPER_IDBITS_MASK) >> \ + GITS_TYPER_IDBITS_SHIFT) + 1) + +#define GITS_TYPER_ITT_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_SIZE_MASK (0xfUL << GITS_TYPER_ITT_SIZE_SHIFT) +#define GITS_TYPER_ITT_SIZE(r) ((((r) & GITS_TYPER_ITT_SIZE_MASK) >> \ + GITS_TYPER_ITT_SIZE_SHIFT) + 1) +#define GITS_TYPER_PHYSICAL (1U << 0) + +#define GITS_BASER_INDIRECT BIT(62, UL) +#define GITS_BASER_INNER_CACHEABILITY_SHIFT 59 +#define GITS_BASER_TYPE_SHIFT 56 +#define GITS_BASER_TYPE_MASK (7ULL << GITS_BASER_TYPE_SHIFT) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT 53 +#define GITS_BASER_TYPE_NONE 0UL +#define GITS_BASER_TYPE_DEVICE 1UL +#define GITS_BASER_TYPE_VCPU 2UL +#define GITS_BASER_TYPE_CPU 3UL +#define GITS_BASER_TYPE_COLLECTION 4UL +#define GITS_BASER_TYPE_RESERVED5 5UL +#define GITS_BASER_TYPE_RESERVED6 6UL +#define GITS_BASER_TYPE_RESERVED7 7UL +#define GITS_BASER_ENTRY_SIZE_SHIFT 48 +#define GITS_BASER_ENTRY_SIZE(reg) \ + (((reg >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_SHAREABILITY_SHIFT 10 +#define GITS_BASER_PAGE_SIZE_SHIFT 8 +#define GITS_BASER_SIZE_MASK 0xff +#define GITS_BASER_SHAREABILITY_MASK (0x3ULL << GITS_BASER_SHAREABILITY_SHIFT) +#define GITS_BASER_OUTER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) +#define GITS_BASER_INNER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_INNER_CACHEABILITY_SHIFT) + +#define GITS_CBASER_SIZE_MASK 0xff + +/* ITS command definitions */ +#define ITS_CMD_SIZE 32 + +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_INV 0x0c +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_DISCARD 0x0f + +#define ITS_DOORBELL_OFFSET 0x10040 +#define GICV3_ITS_SIZE SZ_128K + +#include +#include + +#define HOST_ITS_FLUSH_CMD_QUEUE (1U << 0) +#define HOST_ITS_USES_PTA (1U << 1) + +/* We allocate LPIs on the hosts in chunks of 32 to reduce handling overhead. */ +#define LPI_BLOCK 32U + +/* data structure for each hardware ITS */ +struct host_its { + struct list_head entry; + const struct dt_device_node *dt_node; + paddr_t addr; + paddr_t size; + void __iomem *its_base; + unsigned int devid_bits; + unsigned int evid_bits; + unsigned int itte_size; + spinlock_t cmd_lock; + void *cmd_buf; + unsigned int flags; +}; + + +#ifdef CONFIG_HAS_ITS + +extern struct list_head host_its_list; + +#ifdef CONFIG_ACPI +unsigned long gicv3_its_make_hwdom_madt(const struct domain *d, + void *base_ptr); +#endif + +/* Deny iomem access for its */ +int gicv3_its_deny_access(const struct domain *d); + +bool gicv3_its_host_has_its(void); + +unsigned int vgic_v3_its_count(const struct domain *d); + +void gicv3_do_LPI(unsigned int lpi); + +int gicv3_lpi_init_rdist(void __iomem * rdist_base); + +/* Initialize the host structures for LPIs and the host ITSes. */ +int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits); +int gicv3_its_init(void); + +/* Store the physical address and ID for each redistributor as read from DT. */ +void gicv3_set_redist_address(paddr_t address, unsigned int redist_id); +uint64_t gicv3_get_redist_address(unsigned int cpu, bool use_pta); + +/* Map a collection for this host CPU to each host ITS. */ +int gicv3_its_setup_collection(unsigned int cpu); + +/* Initialize and destroy the per-domain parts of the virtual ITS support. */ +int vgic_v3_its_init_domain(struct domain *d); +void vgic_v3_its_free_domain(struct domain *d); + +/* Create the appropriate DT nodes for a hardware domain. */ +int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, + const struct dt_device_node *gic, + void *fdt); + +/* + * Map a device on the host by allocating an ITT on the host (ITS). + * "nr_event" specifies how many events (interrupts) this device will need. + * Setting "valid" to false deallocates the device. + */ +int gicv3_its_map_guest_device(struct domain *d, + paddr_t host_doorbell, uint32_t host_devid, + paddr_t guest_doorbell, uint32_t guest_devid, + uint64_t nr_events, bool valid); + +int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi); +void gicv3_free_host_lpi_block(uint32_t first_lpi); + +void vgic_vcpu_inject_lpi(struct domain *d, unsigned int virq); + +struct pending_irq *gicv3_its_get_event_pending_irq(struct domain *d, + paddr_t vdoorbell_address, + uint32_t vdevid, + uint32_t eventid); +int gicv3_remove_guest_event(struct domain *d, paddr_t vdoorbell_address, + uint32_t vdevid, uint32_t eventid); +struct pending_irq *gicv3_assign_guest_event(struct domain *d, paddr_t doorbell, + uint32_t devid, uint32_t eventid, + uint32_t virt_lpi); +void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id, + uint32_t virt_lpi); + +#else + +#ifdef CONFIG_ACPI +static inline unsigned long gicv3_its_make_hwdom_madt(const struct domain *d, + void *base_ptr) +{ + return 0; +} +#endif + +static inline int gicv3_its_deny_access(const struct domain *d) +{ + return 0; +} + +static inline bool gicv3_its_host_has_its(void) +{ + return false; +} + +static inline unsigned int vgic_v3_its_count(const struct domain *d) +{ + return 0; +} + +static inline void gicv3_do_LPI(unsigned int lpi) +{ + /* We don't enable LPIs without an ITS. */ + BUG(); +} + +static inline int gicv3_lpi_init_rdist(void __iomem * rdist_base) +{ + return -ENODEV; +} + +static inline int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits) +{ + return 0; +} + +static inline int gicv3_its_init(void) +{ + return 0; +} + +static inline void gicv3_set_redist_address(paddr_t address, + unsigned int redist_id) +{ +} + +static inline int gicv3_its_setup_collection(unsigned int cpu) +{ + /* We should never get here without an ITS. */ + BUG(); +} + +static inline int vgic_v3_its_init_domain(struct domain *d) +{ + return 0; +} + +static inline void vgic_v3_its_free_domain(struct domain *d) +{ +} + +static inline int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, + const struct dt_device_node *gic, + void *fdt) +{ + return 0; +} + +#endif /* CONFIG_HAS_ITS */ + +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/grant_table.h b/xen/arch/arm/include/asm/grant_table.h new file mode 100644 index 0000000000..d31a4d6805 --- /dev/null +++ b/xen/arch/arm/include/asm/grant_table.h @@ -0,0 +1,108 @@ +#ifndef __ASM_GRANT_TABLE_H__ +#define __ASM_GRANT_TABLE_H__ + +#include +#include +#include +#include + +#include + +#define INITIAL_NR_GRANT_FRAMES 1U +#define GNTTAB_MAX_VERSION 1 + +struct grant_table_arch { + gfn_t *shared_gfn; + gfn_t *status_gfn; +}; + +static inline void gnttab_clear_flags(struct domain *d, + unsigned int mask, uint16_t *addr) +{ + guest_clear_mask16(d, mask, addr); +} + +static inline void gnttab_mark_dirty(struct domain *d, mfn_t mfn) +{ +#ifndef NDEBUG + printk_once(XENLOG_G_WARNING "gnttab_mark_dirty not implemented yet\n"); +#endif +} + +int create_grant_host_mapping(unsigned long gpaddr, mfn_t mfn, + unsigned int flags, unsigned int cache_flags); +#define gnttab_host_mapping_get_page_type(ro, ld, rd) (0) +int replace_grant_host_mapping(unsigned long gpaddr, mfn_t mfn, + unsigned long new_gpaddr, unsigned int flags); +#define gnttab_release_host_mappings(domain) 1 + +/* + * The region used by Xen on the memory will never be mapped in DOM0 + * memory layout. Therefore it can be used for the grant table. + * + * Only use the text section as it's always present and will contain + * enough space for a large grant table + */ +#define gnttab_dom0_frames() \ + min_t(unsigned int, opt_max_grant_frames, PFN_DOWN(_etext - _stext)) + +#define gnttab_init_arch(gt) \ +({ \ + unsigned int ngf_ = (gt)->max_grant_frames; \ + unsigned int nsf_ = grant_to_status_frames(ngf_); \ + \ + (gt)->arch.shared_gfn = xmalloc_array(gfn_t, ngf_); \ + (gt)->arch.status_gfn = xmalloc_array(gfn_t, nsf_); \ + if ( (gt)->arch.shared_gfn && (gt)->arch.status_gfn ) \ + { \ + while ( ngf_-- ) \ + (gt)->arch.shared_gfn[ngf_] = INVALID_GFN; \ + while ( nsf_-- ) \ + (gt)->arch.status_gfn[nsf_] = INVALID_GFN; \ + } \ + else \ + gnttab_destroy_arch(gt); \ + (gt)->arch.shared_gfn ? 0 : -ENOMEM; \ +}) + +#define gnttab_destroy_arch(gt) \ + do { \ + XFREE((gt)->arch.shared_gfn); \ + XFREE((gt)->arch.status_gfn); \ + } while ( 0 ) + +#define gnttab_set_frame_gfn(gt, st, idx, gfn, mfn) \ + ({ \ + int rc_ = 0; \ + gfn_t ogfn = gnttab_get_frame_gfn(gt, st, idx); \ + if ( gfn_eq(ogfn, INVALID_GFN) || gfn_eq(ogfn, gfn) || \ + (rc_ = guest_physmap_remove_page((gt)->domain, ogfn, mfn, \ + 0)) == 0 ) \ + ((st) ? (gt)->arch.status_gfn \ + : (gt)->arch.shared_gfn)[idx] = (gfn); \ + rc_; \ + }) + +#define gnttab_get_frame_gfn(gt, st, idx) ({ \ + (st) ? gnttab_status_gfn(NULL, gt, idx) \ + : gnttab_shared_gfn(NULL, gt, idx); \ +}) + +#define gnttab_shared_gfn(d, t, i) \ + (((i) >= nr_grant_frames(t)) ? INVALID_GFN : (t)->arch.shared_gfn[i]) + +#define gnttab_status_gfn(d, t, i) \ + (((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i]) + +#define gnttab_need_iommu_mapping(d) \ + (is_domain_direct_mapped(d) && is_iommu_enabled(d)) + +#endif /* __ASM_GRANT_TABLE_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/guest_access.h b/xen/arch/arm/include/asm/guest_access.h new file mode 100644 index 0000000000..53766386d3 --- /dev/null +++ b/xen/arch/arm/include/asm/guest_access.h @@ -0,0 +1,42 @@ +#ifndef __ASM_ARM_GUEST_ACCESS_H__ +#define __ASM_ARM_GUEST_ACCESS_H__ + +#include +#include + +unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len); +unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from, + unsigned len); +unsigned long raw_copy_from_guest(void *to, const void *from, unsigned len); +unsigned long raw_clear_guest(void *to, unsigned len); + +/* Copy data to guest physical address, then clean the region. */ +unsigned long copy_to_guest_phys_flush_dcache(struct domain *d, + paddr_t phys, + void *buf, + unsigned int len); + +int access_guest_memory_by_ipa(struct domain *d, paddr_t ipa, void *buf, + uint32_t size, bool is_write); + +#define __raw_copy_to_guest raw_copy_to_guest +#define __raw_copy_from_guest raw_copy_from_guest +#define __raw_clear_guest raw_clear_guest + +/* + * Pre-validate a guest handle. + * Allows use of faster __copy_* functions. + */ +/* All ARM guests are paging mode external and hence safe */ +#define guest_handle_okay(hnd, nr) (1) +#define guest_handle_subrange_okay(hnd, first, last) (1) + +#endif /* __ASM_ARM_GUEST_ACCESS_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/guest_atomics.h b/xen/arch/arm/include/asm/guest_atomics.h new file mode 100644 index 0000000000..9e2e96d4ff --- /dev/null +++ b/xen/arch/arm/include/asm/guest_atomics.h @@ -0,0 +1,148 @@ +#ifndef _ARM_GUEST_ATOMICS_H +#define _ARM_GUEST_ATOMICS_H + +#include +#include + +/* + * The guest atomics helpers shares the same logic. We first try to use + * the *_timeout version of the operation. If it didn't timeout, then we + * successfully updated the memory. Nothing else to do. + * + * If it did timeout, then it means we didn't manage to update the + * memory. This is possibly because the guest is misbehaving (i.e tight + * store loop) but can also happen for other reasons (i.e nested Xen). + * In that case pause the domain and retry the operation, this time + * without a timeout. + * + * Note, those helpers rely on other part of the code to prevent sharing + * a page between Xen and multiple domain. + */ + +DECLARE_PER_CPU(unsigned int, guest_safe_atomic_max); + +#define guest_bitop(name) \ +static inline void guest_##name(struct domain *d, int nr, volatile void *p) \ +{ \ + perfc_incr(atomics_guest); \ + \ + if ( name##_timeout(nr, p, this_cpu(guest_safe_atomic_max)) ) \ + return; \ + \ + perfc_incr(atomics_guest_paused); \ + \ + domain_pause_nosync(d); \ + name(nr, p); \ + domain_unpause(d); \ +} + +#define guest_testop(name) \ +static inline int guest_##name(struct domain *d, int nr, volatile void *p) \ +{ \ + bool succeed; \ + int oldbit; \ + \ + perfc_incr(atomics_guest); \ + \ + succeed = name##_timeout(nr, p, &oldbit, \ + this_cpu(guest_safe_atomic_max)); \ + if ( succeed ) \ + return oldbit; \ + \ + perfc_incr(atomics_guest_paused); \ + \ + domain_pause_nosync(d); \ + oldbit = name(nr, p); \ + domain_unpause(d); \ + \ + return oldbit; \ +} + +guest_bitop(set_bit) +guest_bitop(clear_bit) +guest_bitop(change_bit) + +#undef guest_bitop + +/* test_bit does not use load-store atomic operations */ +#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p)) + +guest_testop(test_and_set_bit) +guest_testop(test_and_clear_bit) +guest_testop(test_and_change_bit) + +#undef guest_testop + +static inline void guest_clear_mask16(struct domain *d, uint16_t mask, + volatile uint16_t *p) +{ + perfc_incr(atomics_guest); + + if ( clear_mask16_timeout(mask, p, this_cpu(guest_safe_atomic_max)) ) + return; + + domain_pause_nosync(d); + clear_mask16(mask, p); + domain_unpause(d); +} + +static inline unsigned long __guest_cmpxchg(struct domain *d, + volatile void *ptr, + unsigned long old, + unsigned long new, + unsigned int size) +{ + unsigned long oldval = old; + + perfc_incr(atomics_guest); + + if ( __cmpxchg_timeout(ptr, &oldval, new, size, + this_cpu(guest_safe_atomic_max)) ) + return oldval; + + perfc_incr(atomics_guest_paused); + + domain_pause_nosync(d); + oldval = __cmpxchg(ptr, old, new, size); + domain_unpause(d); + + return oldval; +} + +#define guest_cmpxchg(d, ptr, o, n) \ + ((__typeof__(*(ptr)))__guest_cmpxchg(d, ptr, \ + (unsigned long)(o),\ + (unsigned long)(n),\ + sizeof (*(ptr)))) + +static inline uint64_t guest_cmpxchg64(struct domain *d, + volatile uint64_t *ptr, + uint64_t old, + uint64_t new) +{ + uint64_t oldval = old; + + perfc_incr(atomics_guest); + + if ( __cmpxchg64_timeout(ptr, &oldval, new, + this_cpu(guest_safe_atomic_max)) ) + return oldval; + + perfc_incr(atomics_guest_paused); + + domain_pause_nosync(d); + oldval = cmpxchg64(ptr, old, new); + domain_unpause(d); + + return oldval; +} + +#endif /* _ARM_GUEST_ATOMICS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/guest_walk.h b/xen/arch/arm/include/asm/guest_walk.h new file mode 100644 index 0000000000..8768ac9894 --- /dev/null +++ b/xen/arch/arm/include/asm/guest_walk.h @@ -0,0 +1,19 @@ +#ifndef _XEN_GUEST_WALK_H +#define _XEN_GUEST_WALK_H + +/* Walk the guest's page tables in software. */ +bool guest_walk_tables(const struct vcpu *v, + vaddr_t gva, + paddr_t *ipa, + unsigned int *perms); + +#endif /* _XEN_GUEST_WALK_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/hardirq.h b/xen/arch/arm/include/asm/hardirq.h new file mode 100644 index 0000000000..67b6a673db --- /dev/null +++ b/xen/arch/arm/include/asm/hardirq.h @@ -0,0 +1,27 @@ +#ifndef __ASM_HARDIRQ_H +#define __ASM_HARDIRQ_H + +#include +#include + +typedef struct { + unsigned long __softirq_pending; + unsigned int __local_irq_count; +} __cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +#define in_irq() (local_irq_count(smp_processor_id()) != 0) + +#define irq_enter() (local_irq_count(smp_processor_id())++) +#define irq_exit() (local_irq_count(smp_processor_id())--) + +#endif /* __ASM_HARDIRQ_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/hsr.h b/xen/arch/arm/include/asm/hsr.h new file mode 100644 index 0000000000..9b91b28c48 --- /dev/null +++ b/xen/arch/arm/include/asm/hsr.h @@ -0,0 +1,217 @@ +#ifndef __ASM_ARM_HSR_H +#define __ASM_ARM_HSR_H + +#include + +#if defined(CONFIG_ARM_64) +# include +#endif + +/* HSR data abort size definition */ +enum dabt_size { + DABT_BYTE = 0, + DABT_HALF_WORD = 1, + DABT_WORD = 2, + DABT_DOUBLE_WORD = 3, +}; + +union hsr { + register_t bits; + struct { + unsigned long iss:25; /* Instruction Specific Syndrome */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + }; + + /* Common to all conditional exception classes (0x0N, except 0x00). */ + struct hsr_cond { + unsigned long iss:20; /* Instruction Specific Syndrome */ + unsigned long cc:4; /* Condition Code */ + unsigned long ccvalid:1;/* CC Valid */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } cond; + + struct hsr_wfi_wfe { + unsigned long ti:1; /* Trapped instruction */ + unsigned long sbzp:19; + unsigned long cc:4; /* Condition Code */ + unsigned long ccvalid:1;/* CC Valid */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } wfi_wfe; + + /* reg, reg0, reg1 are 4 bits on AArch32, the fifth bit is sbzp. */ + struct hsr_cp32 { + unsigned long read:1; /* Direction */ + unsigned long crm:4; /* CRm */ + unsigned long reg:5; /* Rt */ + unsigned long crn:4; /* CRn */ + unsigned long op1:3; /* Op1 */ + unsigned long op2:3; /* Op2 */ + unsigned long cc:4; /* Condition Code */ + unsigned long ccvalid:1;/* CC Valid */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } cp32; /* HSR_EC_CP15_32, CP14_32, CP10 */ + + struct hsr_cp64 { + unsigned long read:1; /* Direction */ + unsigned long crm:4; /* CRm */ + unsigned long reg1:5; /* Rt1 */ + unsigned long reg2:5; /* Rt2 */ + unsigned long sbzp2:1; + unsigned long op1:4; /* Op1 */ + unsigned long cc:4; /* Condition Code */ + unsigned long ccvalid:1;/* CC Valid */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } cp64; /* HSR_EC_CP15_64, HSR_EC_CP14_64 */ + + struct hsr_cp { + unsigned long coproc:4; /* Number of coproc accessed */ + unsigned long sbz0p:1; + unsigned long tas:1; /* Trapped Advanced SIMD */ + unsigned long res0:14; + unsigned long cc:4; /* Condition Code */ + unsigned long ccvalid:1;/* CC Valid */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } cp; /* HSR_EC_CP */ + + /* + * This encoding is valid only for ARMv8 (ARM DDI 0487B.a, pages D7-2271 and + * G6-4957). On ARMv7, encoding ISS for EC=0x13 is defined as UNK/SBZP + * (ARM DDI 0406C.c page B3-1431). UNK/SBZP means that hardware implements + * this field as Read-As-Zero. ARMv8 is backwards compatible with ARMv7: + * reading CCKNOWNPASS on ARMv7 will return 0, which means that condition + * check was passed or instruction was unconditional. + */ + struct hsr_smc32 { + unsigned long res0:19; /* Reserved */ + unsigned long ccknownpass:1; /* Instruction passed conditional check */ + unsigned long cc:4; /* Condition Code */ + unsigned long ccvalid:1;/* CC Valid */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } smc32; /* HSR_EC_SMC32 */ + +#ifdef CONFIG_ARM_64 + struct hsr_sysreg { + unsigned long read:1; /* Direction */ + unsigned long crm:4; /* CRm */ + unsigned long reg:5; /* Rt */ + unsigned long crn:4; /* CRn */ + unsigned long op1:3; /* Op1 */ + unsigned long op2:3; /* Op2 */ + unsigned long op0:2; /* Op0 */ + unsigned long res0:3; + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; + } sysreg; /* HSR_EC_SYSREG */ +#endif + + struct hsr_iabt { + unsigned long ifsc:6; /* Instruction fault status code */ + unsigned long res0:1; /* RES0 */ + unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation */ + unsigned long res1:1; /* RES0 */ + unsigned long eat:1; /* External abort type */ + unsigned long fnv:1; /* FAR not Valid */ + unsigned long res2:14; + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } iabt; /* HSR_EC_INSTR_ABORT_* */ + + struct hsr_dabt { + unsigned long dfsc:6; /* Data Fault Status Code */ + unsigned long write:1; /* Write / not Read */ + unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation */ + unsigned long cache:1; /* Cache Maintenance */ + unsigned long eat:1; /* External Abort Type */ + unsigned long fnv:1; /* FAR not Valid */ +#ifdef CONFIG_ARM_32 + unsigned long sbzp0:5; +#else + unsigned long sbzp0:3; + unsigned long ar:1; /* Acquire Release */ + unsigned long sf:1; /* Sixty Four bit register */ +#endif + unsigned long reg:5; /* Register */ + unsigned long sign:1; /* Sign extend */ + unsigned long size:2; /* Access Size */ + unsigned long valid:1; /* Syndrome Valid */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } dabt; /* HSR_EC_DATA_ABORT_* */ + + /* Contain the common bits between DABT and IABT */ + struct hsr_xabt { + unsigned long fsc:6; /* Fault status code */ + unsigned long pad1:1; /* Not common */ + unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation */ + unsigned long pad2:1; /* Not common */ + unsigned long eat:1; /* External abort type */ + unsigned long fnv:1; /* FAR not Valid */ + unsigned long pad3:14; /* Not common */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } xabt; + +#ifdef CONFIG_ARM_64 + struct hsr_brk { + unsigned long comment:16; /* Comment */ + unsigned long res0:9; + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ + } brk; +#endif +}; + +/* HSR.EC == HSR_CP{15,14,10}_32 */ +#define HSR_CP32_OP2_MASK (0x000e0000) +#define HSR_CP32_OP2_SHIFT (17) +#define HSR_CP32_OP1_MASK (0x0001c000) +#define HSR_CP32_OP1_SHIFT (14) +#define HSR_CP32_CRN_MASK (0x00003c00) +#define HSR_CP32_CRN_SHIFT (10) +#define HSR_CP32_CRM_MASK (0x0000001e) +#define HSR_CP32_CRM_SHIFT (1) +#define HSR_CP32_REGS_MASK (HSR_CP32_OP1_MASK|HSR_CP32_OP2_MASK|\ + HSR_CP32_CRN_MASK|HSR_CP32_CRM_MASK) + +/* HSR.EC == HSR_CP{15,14}_64 */ +#define HSR_CP64_OP1_MASK (0x000f0000) +#define HSR_CP64_OP1_SHIFT (16) +#define HSR_CP64_CRM_MASK (0x0000001e) +#define HSR_CP64_CRM_SHIFT (1) +#define HSR_CP64_REGS_MASK (HSR_CP64_OP1_MASK|HSR_CP64_CRM_MASK) + +/* HSR.EC == HSR_SYSREG */ +#define HSR_SYSREG_OP0_MASK (0x00300000) +#define HSR_SYSREG_OP0_SHIFT (20) +#define HSR_SYSREG_OP1_MASK (0x0001c000) +#define HSR_SYSREG_OP1_SHIFT (14) +#define HSR_SYSREG_CRN_MASK (0x00003c00) +#define HSR_SYSREG_CRN_SHIFT (10) +#define HSR_SYSREG_CRM_MASK (0x0000001e) +#define HSR_SYSREG_CRM_SHIFT (1) +#define HSR_SYSREG_OP2_MASK (0x000e0000) +#define HSR_SYSREG_OP2_SHIFT (17) +#define HSR_SYSREG_REGS_MASK (HSR_SYSREG_OP0_MASK|HSR_SYSREG_OP1_MASK|\ + HSR_SYSREG_CRN_MASK|HSR_SYSREG_CRM_MASK|\ + HSR_SYSREG_OP2_MASK) + +/* HSR.EC == HSR_{HVC32, HVC64, SMC64, SVC32, SVC64} */ +#define HSR_XXC_IMM_MASK (0xffff) + +#endif /* __ASM_ARM_HSR_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/hypercall.h b/xen/arch/arm/include/asm/hypercall.h new file mode 100644 index 0000000000..a0c5a31a2f --- /dev/null +++ b/xen/arch/arm/include/asm/hypercall.h @@ -0,0 +1,20 @@ +#ifndef __ASM_ARM_HYPERCALL_H__ +#define __ASM_ARM_HYPERCALL_H__ + +#include /* for arch_do_domctl */ +int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg); + +long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg); + +long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl); + +#endif /* __ASM_ARM_HYPERCALL_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/init.h b/xen/arch/arm/include/asm/init.h new file mode 100644 index 0000000000..5ac8cf8797 --- /dev/null +++ b/xen/arch/arm/include/asm/init.h @@ -0,0 +1,20 @@ +#ifndef _XEN_ASM_INIT_H +#define _XEN_ASM_INIT_H + +struct init_info +{ + /* Pointer to the stack, used by head.S when entering in C */ + unsigned char *stack; + /* Logical CPU ID, used by start_secondary */ + unsigned int cpuid; +}; + +#endif /* _XEN_ASM_INIT_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/insn.h b/xen/arch/arm/include/asm/insn.h new file mode 100644 index 0000000000..27271e95f9 --- /dev/null +++ b/xen/arch/arm/include/asm/insn.h @@ -0,0 +1,29 @@ +#ifndef __ARCH_ARM_INSN +#define __ARCH_ARM_INSN + +#ifndef __ASSEMBLY__ + +#include + +#if defined(CONFIG_ARM_64) +# include +#elif defined(CONFIG_ARM_32) +# include +#else +# error "unknown ARM variant" +#endif + +#endif /* __ASSEMBLY__ */ + +/* On ARM32,64 instructions are always 4 bytes long. */ +#define ARCH_PATCH_INSN_SIZE 4 + +#endif /* !__ARCH_ARM_INSN */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 8 + * indent-tabs-mode: t + * End: + */ diff --git a/xen/arch/arm/include/asm/io.h b/xen/arch/arm/include/asm/io.h new file mode 100644 index 0000000000..e426804424 --- /dev/null +++ b/xen/arch/arm/include/asm/io.h @@ -0,0 +1,20 @@ +#ifndef _ASM_IO_H +#define _ASM_IO_H + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/iocap.h b/xen/arch/arm/include/asm/iocap.h new file mode 100644 index 0000000000..276fefbc59 --- /dev/null +++ b/xen/arch/arm/include/asm/iocap.h @@ -0,0 +1,16 @@ +#ifndef __X86_IOCAP_H__ +#define __X86_IOCAP_H__ + +#define cache_flush_permitted(d) \ + (!rangeset_is_empty((d)->iomem_caps)) + +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/iommu.h b/xen/arch/arm/include/asm/iommu.h new file mode 100644 index 0000000000..937edc8373 --- /dev/null +++ b/xen/arch/arm/include/asm/iommu.h @@ -0,0 +1,45 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . +*/ +#ifndef __ARCH_ARM_IOMMU_H__ +#define __ARCH_ARM_IOMMU_H__ + +struct arch_iommu +{ + /* Private information for the IOMMU drivers */ + void *priv; +}; + +const struct iommu_ops *iommu_get_ops(void); +void iommu_set_ops(const struct iommu_ops *ops); + +/* + * The mapping helpers below should only be used if P2M Table is shared + * between the CPU and the IOMMU. + */ +int __must_check arm_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, + unsigned int flags, + unsigned int *flush_flags); +int __must_check arm_iommu_unmap_page(struct domain *d, dfn_t dfn, + unsigned int *flush_flags); + +#endif /* __ARCH_ARM_IOMMU_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/iommu_fwspec.h b/xen/arch/arm/include/asm/iommu_fwspec.h new file mode 100644 index 0000000000..5cdb53f8e8 --- /dev/null +++ b/xen/arch/arm/include/asm/iommu_fwspec.h @@ -0,0 +1,68 @@ +/* + * xen/include/asm-arm/iommu_fwspec.h + * + * Contains a common structure to hold the per-device firmware data and + * declaration of functions used to maintain that data + * + * Based on Linux's iommu_fwspec support you can find at: + * include/linux/iommu.h + * + * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. + * + * Copyright (C) 2019 EPAM Systems Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __ARCH_ARM_IOMMU_FWSPEC_H__ +#define __ARCH_ARM_IOMMU_FWSPEC_H__ + +/* per-device IOMMU instance data */ +struct iommu_fwspec { + /* this device's IOMMU */ + struct device *iommu_dev; + /* IOMMU driver private data for this device */ + void *iommu_priv; + /* number of associated device IDs */ + unsigned int num_ids; + /* IDs which this device may present to the IOMMU */ + uint32_t ids[]; +}; + +int iommu_fwspec_init(struct device *dev, struct device *iommu_dev); +void iommu_fwspec_free(struct device *dev); +int iommu_fwspec_add_ids(struct device *dev, const uint32_t *ids, + unsigned int num_ids); + +static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) +{ + return dev->iommu_fwspec; +} + +static inline void dev_iommu_fwspec_set(struct device *dev, + struct iommu_fwspec *fwspec) +{ + dev->iommu_fwspec = fwspec; +} + +#endif /* __ARCH_ARM_IOMMU_FWSPEC_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/ioreq.h b/xen/arch/arm/include/asm/ioreq.h new file mode 100644 index 0000000000..50185978d5 --- /dev/null +++ b/xen/arch/arm/include/asm/ioreq.h @@ -0,0 +1,70 @@ +/* + * ioreq.h: Hardware virtual machine assist interface definitions. + * + * Copyright (c) 2016 Citrix Systems Inc. + * Copyright (c) 2019 Arm ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_ARM_IOREQ_H__ +#define __ASM_ARM_IOREQ_H__ + +#ifdef CONFIG_IOREQ_SERVER +enum io_state handle_ioserv(struct cpu_user_regs *regs, struct vcpu *v); +enum io_state try_fwd_ioserv(struct cpu_user_regs *regs, + struct vcpu *v, mmio_info_t *info); +#else +static inline enum io_state handle_ioserv(struct cpu_user_regs *regs, + struct vcpu *v) +{ + return IO_UNHANDLED; +} + +static inline enum io_state try_fwd_ioserv(struct cpu_user_regs *regs, + struct vcpu *v, mmio_info_t *info) +{ + return IO_UNHANDLED; +} +#endif + +static inline bool handle_pio(uint16_t port, unsigned int size, int dir) +{ + /* + * TODO: For Arm64, the main user will be PCI. So this should be + * implemented when we add support for vPCI. + */ + ASSERT_UNREACHABLE(); + return true; +} + +static inline void msix_write_completion(struct vcpu *v) +{ +} + +/* This correlation must not be altered */ +#define IOREQ_STATUS_HANDLED IO_HANDLED +#define IOREQ_STATUS_UNHANDLED IO_UNHANDLED +#define IOREQ_STATUS_RETRY IO_RETRY + +#endif /* __ASM_ARM_IOREQ_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/irq.h b/xen/arch/arm/include/asm/irq.h new file mode 100644 index 0000000000..e45d574598 --- /dev/null +++ b/xen/arch/arm/include/asm/irq.h @@ -0,0 +1,109 @@ +#ifndef _ASM_HW_IRQ_H +#define _ASM_HW_IRQ_H + +#include +#include + +/* + * These defines correspond to the Xen internal representation of the + * IRQ types. We choose to make them the same as the existing device + * tree definitions for convenience. + */ +#define IRQ_TYPE_NONE DT_IRQ_TYPE_NONE +#define IRQ_TYPE_EDGE_RISING DT_IRQ_TYPE_EDGE_RISING +#define IRQ_TYPE_EDGE_FALLING DT_IRQ_TYPE_EDGE_FALLING +#define IRQ_TYPE_EDGE_BOTH DT_IRQ_TYPE_EDGE_BOTH +#define IRQ_TYPE_LEVEL_HIGH DT_IRQ_TYPE_LEVEL_HIGH +#define IRQ_TYPE_LEVEL_LOW DT_IRQ_TYPE_LEVEL_LOW +#define IRQ_TYPE_LEVEL_MASK DT_IRQ_TYPE_LEVEL_MASK +#define IRQ_TYPE_SENSE_MASK DT_IRQ_TYPE_SENSE_MASK +#define IRQ_TYPE_INVALID DT_IRQ_TYPE_INVALID + +#define NR_VECTORS 256 /* XXX */ + +typedef struct { + DECLARE_BITMAP(_bits,NR_VECTORS); +} vmask_t; + +struct arch_pirq +{ +}; + +struct arch_irq_desc { + unsigned int type; +}; + +#define NR_LOCAL_IRQS 32 + +/* + * This only covers the interrupts that Xen cares about, so SGIs, PPIs and + * SPIs. LPIs are too numerous, also only propagated to guests, so they are + * not included in this number. + */ +#define NR_IRQS 1024 + +#define LPI_OFFSET 8192 + +/* LPIs are always numbered starting at 8192, so 0 is a good invalid case. */ +#define INVALID_LPI 0 + +/* This is a spurious interrupt ID which never makes it into the GIC code. */ +#define INVALID_IRQ 1023 + +extern const unsigned int nr_irqs; +#define nr_static_irqs NR_IRQS +#define arch_hwdom_irqs(domid) NR_IRQS + +struct irq_desc; +struct irqaction; + +struct irq_desc *__irq_to_desc(int irq); + +#define irq_to_desc(irq) __irq_to_desc(irq) + +void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq); + +static inline bool is_lpi(unsigned int irq) +{ + return irq >= LPI_OFFSET; +} + +#define domain_pirq_to_irq(d, pirq) (pirq) + +bool is_assignable_irq(unsigned int irq); + +void init_IRQ(void); +void init_secondary_IRQ(void); + +int route_irq_to_guest(struct domain *d, unsigned int virq, + unsigned int irq, const char *devname); +int release_guest_irq(struct domain *d, unsigned int irq); + +void arch_move_irqs(struct vcpu *v); + +#define arch_evtchn_bind_pirq(d, pirq) ((void)((d) + (pirq))) + +/* Set IRQ type for an SPI */ +int irq_set_spi_type(unsigned int spi, unsigned int type); + +int irq_set_type(unsigned int irq, unsigned int type); + +int platform_get_irq(const struct dt_device_node *device, int index); + +void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask); + +/* + * Use this helper in places that need to know whether the IRQ type is + * set by the domain. + */ +bool irq_type_set_by_domain(const struct domain *d); + +#endif /* _ASM_HW_IRQ_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/kernel.h b/xen/arch/arm/include/asm/kernel.h new file mode 100644 index 0000000000..874aa108a7 --- /dev/null +++ b/xen/arch/arm/include/asm/kernel.h @@ -0,0 +1,89 @@ +/* + * Kernel image loading. + * + * Copyright (C) 2011 Citrix Systems, Inc. + */ +#ifndef __ARCH_ARM_KERNEL_H__ +#define __ARCH_ARM_KERNEL_H__ + +#include +#include + +struct kernel_info { +#ifdef CONFIG_ARM_64 + enum domain_type type; +#endif + + struct domain *d; + + void *fdt; /* flat device tree */ + paddr_t unassigned_mem; /* RAM not (yet) assigned to a bank */ + struct meminfo mem; + + /* kernel entry point */ + paddr_t entry; + + /* grant table region */ + paddr_t gnttab_start; + paddr_t gnttab_size; + + /* boot blob load addresses */ + const struct bootmodule *kernel_bootmodule, *initrd_bootmodule, *dtb_bootmodule; + const char* cmdline; + paddr_t dtb_paddr; + paddr_t initrd_paddr; + + /* Enable pl011 emulation */ + bool vpl011; + + /* GIC phandle */ + uint32_t phandle_gic; + + /* loader to use for this kernel */ + void (*load)(struct kernel_info *info); + /* loader specific state */ + union { + struct { + paddr_t kernel_addr; + paddr_t len; +#ifdef CONFIG_ARM_64 + paddr_t text_offset; /* 64-bit Image only */ +#endif + paddr_t start; /* 32-bit zImage only */ + } zimage; + }; +}; + +/* + * Probe the kernel to detemine its type and select a loader. + * + * Sets in info: + * ->type + * ->load hook, and sets loader specific variables ->zimage + */ +int kernel_probe(struct kernel_info *info, const struct dt_device_node *domain); + +/* + * Loads the kernel into guest RAM. + * + * Expects to be set in info when called: + * ->mem + * ->fdt + * + * Sets in info: + * ->entry + * ->dtb_paddr + * ->initrd_paddr + */ +void kernel_load(struct kernel_info *info); + +#endif /* #ifdef __ARCH_ARM_KERNEL_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/livepatch.h b/xen/arch/arm/include/asm/livepatch.h new file mode 100644 index 0000000000..026af5e7dc --- /dev/null +++ b/xen/arch/arm/include/asm/livepatch.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +#ifndef __XEN_ARM_LIVEPATCH_H__ +#define __XEN_ARM_LIVEPATCH_H__ + +#include /* For SZ_* macros. */ +#include + +/* + * The va of the hypervisor .text region. We need this as the + * normal va are write protected. + */ +extern void *vmap_of_xen_text; + +/* These ranges are only for unconditional branches. */ +#ifdef CONFIG_ARM_32 +/* ARM32: A4.3 IN ARM DDI 0406C.c - we are using only ARM instructions in Xen.*/ +#define ARCH_LIVEPATCH_RANGE SZ_32M +#else +/* ARM64: C1.3.2 in ARM DDI 0487A.j */ +#define ARCH_LIVEPATCH_RANGE SZ_128M +#endif + +#endif /* __XEN_ARM_LIVEPATCH_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/lpae.h b/xen/arch/arm/include/asm/lpae.h new file mode 100644 index 0000000000..e94de2e7d8 --- /dev/null +++ b/xen/arch/arm/include/asm/lpae.h @@ -0,0 +1,257 @@ +#ifndef __ARM_LPAE_H__ +#define __ARM_LPAE_H__ + +#ifndef __ASSEMBLY__ + +#include + +/* + * WARNING! Unlike the x86 pagetable code, where l1 is the lowest level and + * l4 is the root of the trie, the ARM pagetables follow ARM's documentation: + * the levels are called first, second &c in the order that the MMU walks them + * (i.e. "first" is the root of the trie). + */ + +/****************************************************************************** + * ARMv7-A LPAE pagetables: 3-level trie, mapping 40-bit input to + * 40-bit output addresses. Tables at all levels have 512 64-bit entries + * (i.e. are 4Kb long). + * + * The bit-shuffling that has the permission bits in branch nodes in a + * different place from those in leaf nodes seems to be to allow linear + * pagetable tricks. If we're not doing that then the set of permission + * bits that's not in use in a given node type can be used as + * extra software-defined bits. + */ + +typedef struct __packed { + /* These are used in all kinds of entry. */ + unsigned long valid:1; /* Valid mapping */ + unsigned long table:1; /* == 1 in 4k map entries too */ + + /* + * These ten bits are only used in Block entries and are ignored + * in Table entries. + */ + unsigned long ai:3; /* Attribute Index */ + unsigned long ns:1; /* Not-Secure */ + unsigned long up:1; /* Unpriviledged access */ + unsigned long ro:1; /* Read-Only */ + unsigned long sh:2; /* Shareability */ + unsigned long af:1; /* Access Flag */ + unsigned long ng:1; /* Not-Global */ + + /* The base address must be appropriately aligned for Block entries */ + unsigned long long base:36; /* Base address of block or next table */ + unsigned long sbz:4; /* Must be zero */ + + /* + * These seven bits are only used in Block entries and are ignored + * in Table entries. + */ + unsigned long contig:1; /* In a block of 16 contiguous entries */ + unsigned long pxn:1; /* Privileged-XN */ + unsigned long xn:1; /* eXecute-Never */ + unsigned long avail:4; /* Ignored by hardware */ + + /* + * These 5 bits are only used in Table entries and are ignored in + * Block entries. + */ + unsigned long pxnt:1; /* Privileged-XN */ + unsigned long xnt:1; /* eXecute-Never */ + unsigned long apt:2; /* Access Permissions */ + unsigned long nst:1; /* Not-Secure */ +} lpae_pt_t; + +/* + * The p2m tables have almost the same layout, but some of the permission + * and cache-control bits are laid out differently (or missing). + */ +typedef struct __packed { + /* These are used in all kinds of entry. */ + unsigned long valid:1; /* Valid mapping */ + unsigned long table:1; /* == 1 in 4k map entries too */ + + /* + * These ten bits are only used in Block entries and are ignored + * in Table entries. + */ + unsigned long mattr:4; /* Memory Attributes */ + unsigned long read:1; /* Read access */ + unsigned long write:1; /* Write access */ + unsigned long sh:2; /* Shareability */ + unsigned long af:1; /* Access Flag */ + unsigned long sbz4:1; + + /* The base address must be appropriately aligned for Block entries */ + unsigned long long base:36; /* Base address of block or next table */ + unsigned long sbz3:4; + + /* + * These seven bits are only used in Block entries and are ignored + * in Table entries. + */ + unsigned long contig:1; /* In a block of 16 contiguous entries */ + unsigned long sbz2:1; + unsigned long xn:1; /* eXecute-Never */ + unsigned long type:4; /* Ignore by hardware. Used to store p2m types */ + + unsigned long sbz1:5; +} lpae_p2m_t; + +/* Permission mask: xn, write, read */ +#define P2M_PERM_MASK (0x00400000000000C0ULL) +#define P2M_CLEAR_PERM(pte) ((pte).bits & ~P2M_PERM_MASK) + +/* + * Walk is the common bits of p2m and pt entries which are needed to + * simply walk the table (e.g. for debug). + */ +typedef struct __packed { + /* These are used in all kinds of entry. */ + unsigned long valid:1; /* Valid mapping */ + unsigned long table:1; /* == 1 in 4k map entries too */ + + unsigned long pad2:10; + + /* The base address must be appropriately aligned for Block entries */ + unsigned long long base:36; /* Base address of block or next table */ + + unsigned long pad1:16; +} lpae_walk_t; + +typedef union { + uint64_t bits; + lpae_pt_t pt; + lpae_p2m_t p2m; + lpae_walk_t walk; +} lpae_t; + +static inline bool lpae_is_valid(lpae_t pte) +{ + return pte.walk.valid; +} + +/* + * lpae_is_* don't check the valid bit. This gives an opportunity for the + * callers to operate on the entry even if they are not valid. For + * instance to store information in advance. + */ +static inline bool lpae_is_table(lpae_t pte, unsigned int level) +{ + return (level < 3) && pte.walk.table; +} + +static inline bool lpae_is_mapping(lpae_t pte, unsigned int level) +{ + if ( level == 3 ) + return pte.walk.table; + else + return !pte.walk.table; +} + +static inline bool lpae_is_superpage(lpae_t pte, unsigned int level) +{ + return (level < 3) && lpae_is_mapping(pte, level); +} + +#define lpae_get_mfn(pte) (_mfn((pte).walk.base)) +#define lpae_set_mfn(pte, mfn) ((pte).walk.base = mfn_x(mfn)) + +/* + * AArch64 supports pages with different sizes (4K, 16K, and 64K). + * Provide a set of generic helpers that will compute various + * information based on the page granularity. + * + * Note the parameter 'gs' is the page shift of the granularity used. + * Some macro will evaluate 'gs' twice rather than storing in a + * variable. This is to allow using the macros in assembly. + */ + +/* + * Granularity | PAGE_SHIFT | LPAE_SHIFT + * ------------------------------------- + * 4K | 12 | 9 + * 16K | 14 | 11 + * 64K | 16 | 13 + * + * This is equivalent to LPAE_SHIFT = PAGE_SHIFT - 3 + */ +#define LPAE_SHIFT_GS(gs) ((gs) - 3) +#define LPAE_ENTRIES_GS(gs) (_AC(1, U) << LPAE_SHIFT_GS(gs)) +#define LPAE_ENTRIES_MASK_GS(gs) (LPAE_ENTRIES_GS(gs) - 1) + +#define LEVEL_ORDER_GS(gs, lvl) ((3 - (lvl)) * LPAE_SHIFT_GS(gs)) +#define LEVEL_SHIFT_GS(gs, lvl) (LEVEL_ORDER_GS(gs, lvl) + (gs)) +#define LEVEL_SIZE_GS(gs, lvl) (_AT(paddr_t, 1) << LEVEL_SHIFT_GS(gs, lvl)) + +/* Offset in the table at level 'lvl' */ +#define LPAE_TABLE_INDEX_GS(gs, lvl, addr) \ + (((addr) >> LEVEL_SHIFT_GS(gs, lvl)) & LPAE_ENTRIES_MASK_GS(gs)) + +/* Generate an array @var containing the offset for each level from @addr */ +#define DECLARE_OFFSETS(var, addr) \ + const unsigned int var[4] = { \ + zeroeth_table_offset(addr), \ + first_table_offset(addr), \ + second_table_offset(addr), \ + third_table_offset(addr) \ + } + +#endif /* __ASSEMBLY__ */ + +/* + * These numbers add up to a 48-bit input address space. + * + * On 32-bit the zeroeth level does not exist, therefore the total is + * 39-bits. The ARMv7-A architecture actually specifies a 40-bit input + * address space for the p2m, with an 8K (1024-entry) top-level table. + * However Xen only supports 16GB of RAM on 32-bit ARM systems and + * therefore 39-bits are sufficient. + */ + +#define LPAE_SHIFT 9 +#define LPAE_ENTRIES (_AC(1,U) << LPAE_SHIFT) +#define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1) + +#define THIRD_SHIFT (PAGE_SHIFT) +#define THIRD_ORDER (THIRD_SHIFT - PAGE_SHIFT) +#define THIRD_SIZE (_AT(paddr_t, 1) << THIRD_SHIFT) +#define THIRD_MASK (~(THIRD_SIZE - 1)) +#define SECOND_SHIFT (THIRD_SHIFT + LPAE_SHIFT) +#define SECOND_ORDER (SECOND_SHIFT - PAGE_SHIFT) +#define SECOND_SIZE (_AT(paddr_t, 1) << SECOND_SHIFT) +#define SECOND_MASK (~(SECOND_SIZE - 1)) +#define FIRST_SHIFT (SECOND_SHIFT + LPAE_SHIFT) +#define FIRST_ORDER (FIRST_SHIFT - PAGE_SHIFT) +#define FIRST_SIZE (_AT(paddr_t, 1) << FIRST_SHIFT) +#define FIRST_MASK (~(FIRST_SIZE - 1)) +#define ZEROETH_SHIFT (FIRST_SHIFT + LPAE_SHIFT) +#define ZEROETH_ORDER (ZEROETH_SHIFT - PAGE_SHIFT) +#define ZEROETH_SIZE (_AT(paddr_t, 1) << ZEROETH_SHIFT) +#define ZEROETH_MASK (~(ZEROETH_SIZE - 1)) + +/* Calculate the offsets into the pagetables for a given VA */ +#define zeroeth_linear_offset(va) ((va) >> ZEROETH_SHIFT) +#define first_linear_offset(va) ((va) >> FIRST_SHIFT) +#define second_linear_offset(va) ((va) >> SECOND_SHIFT) +#define third_linear_offset(va) ((va) >> THIRD_SHIFT) + +#define TABLE_OFFSET(offs) (_AT(unsigned int, offs) & LPAE_ENTRY_MASK) +#define first_table_offset(va) TABLE_OFFSET(first_linear_offset(va)) +#define second_table_offset(va) TABLE_OFFSET(second_linear_offset(va)) +#define third_table_offset(va) TABLE_OFFSET(third_linear_offset(va)) +#define zeroeth_table_offset(va) TABLE_OFFSET(zeroeth_linear_offset(va)) + +#endif /* __ARM_LPAE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/macros.h b/xen/arch/arm/include/asm/macros.h new file mode 100644 index 0000000000..1aa373760f --- /dev/null +++ b/xen/arch/arm/include/asm/macros.h @@ -0,0 +1,32 @@ +#ifndef __ASM_MACROS_H +#define __ASM_MACROS_H + +#ifndef __ASSEMBLY__ +# error "This file should only be included in assembly file" +#endif + + /* + * Speculative barrier + * XXX: Add support for the 'sb' instruction + */ + .macro sb + dsb nsh + isb + .endm + +#if defined (CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + + /* NOP sequence */ + .macro nops, num + .rept \num + nop + .endr + .endm + +#endif /* __ASM_ARM_MACROS_H */ diff --git a/xen/arch/arm/include/asm/mem_access.h b/xen/arch/arm/include/asm/mem_access.h new file mode 100644 index 0000000000..35ed0ad154 --- /dev/null +++ b/xen/arch/arm/include/asm/mem_access.h @@ -0,0 +1,53 @@ +/* + * mem_access.h: architecture specific mem_access handling routines + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef _ASM_ARM_MEM_ACCESS_H +#define _ASM_ARM_MEM_ACCESS_H + +static inline +bool p2m_mem_access_emulate_check(struct vcpu *v, + const struct vm_event_st *rsp) +{ + /* Not supported on ARM. */ + return false; +} + +/* vm_event and mem_access are supported on any ARM guest */ +static inline bool p2m_mem_access_sanity_check(struct domain *d) +{ + return true; +} + +/* + * Send mem event based on the access. Boolean return value indicates if trap + * needs to be injected into guest. + */ +bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec); + +struct page_info* +p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag, + const struct vcpu *v); + +#endif /* _ASM_ARM_MEM_ACCESS_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h new file mode 100644 index 0000000000..7b5e7b7f69 --- /dev/null +++ b/xen/arch/arm/include/asm/mm.h @@ -0,0 +1,373 @@ +#ifndef __ARCH_ARM_MM__ +#define __ARCH_ARM_MM__ + +#include +#include +#include +#include + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +/* Align Xen to a 2 MiB boundary. */ +#define XEN_PADDR_ALIGN (1 << 21) + +/* + * Per-page-frame information. + * + * Every architecture must ensure the following: + * 1. 'struct page_info' contains a 'struct page_list_entry list'. + * 2. Provide a PFN_ORDER() macro for accessing the order of a free page. + */ +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) + +struct page_info +{ + /* Each frame can be threaded onto a doubly-linked list. */ + struct page_list_entry list; + + /* Reference count and various PGC_xxx flags and fields. */ + unsigned long count_info; + + /* Context-dependent fields follow... */ + union { + /* Page is in use: ((count_info & PGC_count_mask) != 0). */ + struct { + /* Type reference count and various PGT_xxx flags and fields. */ + unsigned long type_info; + } inuse; + /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ + union { + struct { + /* + * Index of the first *possibly* unscrubbed page in the buddy. + * One more bit than maximum possible order to accommodate + * INVALID_DIRTY_IDX. + */ +#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) + unsigned long first_dirty:MAX_ORDER + 1; + + /* Do TLBs need flushing for safety before next page use? */ + bool need_tlbflush:1; + +#define BUDDY_NOT_SCRUBBING 0 +#define BUDDY_SCRUBBING 1 +#define BUDDY_SCRUB_ABORT 2 + unsigned long scrub_state:2; + }; + + unsigned long val; + } free; + + } u; + + union { + /* Page is in use, but not as a shadow. */ + struct { + /* Owner of this page (zero if page is anonymous). */ + struct domain *domain; + } inuse; + + /* Page is on a free list. */ + struct { + /* Order-size of the free chunk this page is the head of. */ + unsigned int order; + } free; + + } v; + + union { + /* + * Timestamp from 'TLB clock', used to avoid extra safety flushes. + * Only valid for: a) free pages, and b) pages with zero type count + */ + u32 tlbflush_timestamp; + }; + u64 pad; +}; + +#define PG_shift(idx) (BITS_PER_LONG - (idx)) +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) + +#define PGT_none PG_mask(0, 1) /* no special uses of this page */ +#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */ +#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */ + + /* Count of uses of this frame as its current type. */ +#define PGT_count_width PG_shift(2) +#define PGT_count_mask ((1UL<count_info&PGC_state) == PGC_state_##st) +/* Page is not reference counted */ +#define _PGC_extra PG_shift(10) +#define PGC_extra PG_mask(1, 10) + +/* Count of references to this frame. */ +#define PGC_count_width PG_shift(10) +#define PGC_count_mask ((1UL<= mfn_x(xenheap_mfn_start) && \ + mfn_ < mfn_x(xenheap_mfn_end)); \ +}) +#else +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) +#define is_xen_heap_mfn(mfn) \ + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) +#endif + +#define is_xen_fixed_mfn(mfn) \ + ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ + (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) + +#define page_get_owner(_p) (_p)->v.inuse.domain +#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) + +#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma)))) + +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) +/* PDX of the first page in the frame table. */ +extern unsigned long frametable_base_pdx; + +extern unsigned long max_page; +extern unsigned long total_pages; + +#define PDX_GROUP_SHIFT SECOND_SHIFT + +/* Boot-time pagetable setup */ +extern void setup_pagetables(unsigned long boot_phys_offset); +/* Map FDT in boot pagetable */ +extern void *early_fdt_map(paddr_t fdt_paddr); +/* Remove early mappings */ +extern void remove_early_mappings(void); +/* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the + * new page table */ +extern int init_secondary_pagetables(int cpu); +/* Switch secondary CPUS to its own pagetables and finalise MMU setup */ +extern void mmu_init_secondary_cpu(void); +/* Set up the xenheap: up to 1GB of contiguous, always-mapped memory. + * Base must be 32MB aligned and size a multiple of 32MB. */ +extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns); +/* Map a frame table to cover physical addresses ps through pe */ +extern void setup_frametable_mappings(paddr_t ps, paddr_t pe); +/* Map a 4k page in a fixmap entry */ +extern void set_fixmap(unsigned map, mfn_t mfn, unsigned attributes); +/* Remove a mapping from a fixmap entry */ +extern void clear_fixmap(unsigned map); +/* map a physical range in virtual memory */ +void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned attributes); + +static inline void __iomem *ioremap_nocache(paddr_t start, size_t len) +{ + return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE); +} + +static inline void __iomem *ioremap_cache(paddr_t start, size_t len) +{ + return ioremap_attr(start, len, PAGE_HYPERVISOR); +} + +static inline void __iomem *ioremap_wc(paddr_t start, size_t len) +{ + return ioremap_attr(start, len, PAGE_HYPERVISOR_WC); +} + +/* XXX -- account for base */ +#define mfn_valid(mfn) ({ \ + unsigned long __m_f_n = mfn_x(mfn); \ + likely(pfn_to_pdx(__m_f_n) >= frametable_base_pdx && __mfn_valid(__m_f_n)); \ +}) + +/* Convert between machine frame numbers and page-info structures. */ +#define mfn_to_page(mfn) \ + (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) +#define page_to_mfn(pg) \ + pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx) + +/* Convert between machine addresses and page-info structures. */ +#define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma)) +#define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg))) + +/* Convert between frame number and address formats. */ +#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT) +#define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT)) +#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) +#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) +#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) +#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) +#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) +#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) + +/* Page-align address and convert to frame number format */ +#define paddr_to_pfn_aligned(paddr) paddr_to_pfn(PAGE_ALIGN(paddr)) + +static inline paddr_t __virt_to_maddr(vaddr_t va) +{ + uint64_t par = va_to_par(va); + return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK); +} +#define virt_to_maddr(va) __virt_to_maddr((vaddr_t)(va)) + +#ifdef CONFIG_ARM_32 +static inline void *maddr_to_virt(paddr_t ma) +{ + ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma))); + ma -= mfn_to_maddr(xenheap_mfn_start); + return (void *)(unsigned long) ma + XENHEAP_VIRT_START; +} +#else +static inline void *maddr_to_virt(paddr_t ma) +{ + ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - xenheap_base_pdx) < + (DIRECTMAP_SIZE >> PAGE_SHIFT)); + return (void *)(XENHEAP_VIRT_START - + (xenheap_base_pdx << PAGE_SHIFT) + + ((ma & ma_va_bottom_mask) | + ((ma & ma_top_mask) >> pfn_pdx_hole_shift))); +} +#endif + +/* + * Translate a guest virtual address to a machine address. + * Return the fault information if the translation has failed else 0. + */ +static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa, + unsigned int flags) +{ + uint64_t par = gva_to_ma_par(va, flags); + if ( par & PAR_F ) + return par; + *pa = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK); + return 0; +} + +/* Convert between Xen-heap virtual addresses and machine addresses. */ +#define __pa(x) (virt_to_maddr(x)) +#define __va(x) (maddr_to_virt(x)) + +/* Convert between Xen-heap virtual addresses and machine frame numbers. */ +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) +#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT)) + +/* + * We define non-underscored wrappers for above conversion functions. + * These are overriden in various source files while underscored version + * remain intact. + */ +#define virt_to_mfn(va) __virt_to_mfn(va) +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) + +/* Convert between Xen-heap virtual addresses and page-info structures. */ +static inline struct page_info *virt_to_page(const void *v) +{ + unsigned long va = (unsigned long)v; + unsigned long pdx; + + ASSERT(va >= XENHEAP_VIRT_START); + ASSERT(va < xenheap_virt_end); + + pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT; + pdx += mfn_to_pdx(xenheap_mfn_start); + return frame_table + pdx - frametable_base_pdx; +} + +static inline void *page_to_virt(const struct page_info *pg) +{ + return mfn_to_virt(mfn_x(page_to_mfn(pg))); +} + +struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, + unsigned long flags); + +/* + * Arm does not have an M2P, but common code expects a handful of + * M2P-related defines and functions. Provide dummy versions of these. + */ +#define INVALID_M2P_ENTRY (~0UL) +#define SHARED_M2P_ENTRY (~0UL - 1UL) +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) + +/* Xen always owns P2M on ARM */ +#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0) +#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) + +/* Arch-specific portion of memory_op hypercall. */ +long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg); + +#define domain_set_alloc_bitsize(d) ((void)0) +#define domain_clamp_alloc_bitsize(d, b) (b) + +unsigned long domain_get_maximum_gpfn(struct domain *d); + +#define memguard_guard_stack(_p) ((void)0) +#define memguard_guard_range(_p,_l) ((void)0) +#define memguard_unguard_range(_p,_l) ((void)0) + +/* Release all __init and __initdata ranges to be reused */ +void free_init_memory(void); + +int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, + unsigned int order); + +extern void put_page_type(struct page_info *page); +static inline void put_page_and_type(struct page_info *page) +{ + put_page_type(page); + put_page(page); +} + +void clear_and_clean_page(struct page_info *page); + +unsigned int arch_get_dma_bitsize(void); + +#endif /* __ARCH_ARM_MM__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/mmio.h b/xen/arch/arm/include/asm/mmio.h new file mode 100644 index 0000000000..7ab873cb8f --- /dev/null +++ b/xen/arch/arm/include/asm/mmio.h @@ -0,0 +1,86 @@ +/* + * xen/include/asm-arm/mmio.h + * + * ARM I/O handlers + * + * Copyright (c) 2011 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_MMIO_H__ +#define __ASM_ARM_MMIO_H__ + +#include +#include + +#include + +#define MAX_IO_HANDLER 16 + +typedef struct +{ + struct hsr_dabt dabt; + paddr_t gpa; +} mmio_info_t; + +enum io_state +{ + IO_ABORT, /* The IO was handled by the helper and led to an abort. */ + IO_HANDLED, /* The IO was successfully handled by the helper. */ + IO_UNHANDLED, /* The IO was not handled by the helper. */ + IO_RETRY, /* Retry the emulation for some reason */ +}; + +typedef int (*mmio_read_t)(struct vcpu *v, mmio_info_t *info, + register_t *r, void *priv); +typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info, + register_t r, void *priv); + +struct mmio_handler_ops { + mmio_read_t read; + mmio_write_t write; +}; + +struct mmio_handler { + paddr_t addr; + paddr_t size; + const struct mmio_handler_ops *ops; + void *priv; +}; + +struct vmmio { + int num_entries; + int max_num_entries; + rwlock_t lock; + struct mmio_handler *handlers; +}; + +enum io_state try_handle_mmio(struct cpu_user_regs *regs, + const union hsr hsr, + paddr_t gpa); +void register_mmio_handler(struct domain *d, + const struct mmio_handler_ops *ops, + paddr_t addr, paddr_t size, void *priv); +int domain_io_init(struct domain *d, int max_count); +void domain_io_free(struct domain *d); + + +#endif /* __ASM_ARM_MMIO_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/monitor.h b/xen/arch/arm/include/asm/monitor.h new file mode 100644 index 0000000000..7567be66bd --- /dev/null +++ b/xen/arch/arm/include/asm/monitor.h @@ -0,0 +1,68 @@ +/* + * include/asm-arm/monitor.h + * + * Arch-specific monitor_op domctl handler. + * + * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) + * Copyright (c) 2016, Bitdefender S.R.L. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __ASM_ARM_MONITOR_H__ +#define __ASM_ARM_MONITOR_H__ + +#include +#include + +static inline +void arch_monitor_allow_userspace(struct domain *d, bool allow_userspace) +{ +} + +static inline +int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop) +{ + /* No arch-specific monitor ops on ARM. */ + return -EOPNOTSUPP; +} + +int arch_monitor_domctl_event(struct domain *d, + struct xen_domctl_monitor_op *mop); + +static inline +int arch_monitor_init_domain(struct domain *d) +{ + /* No arch-specific domain initialization on ARM. */ + return 0; +} + +static inline +void arch_monitor_cleanup_domain(struct domain *d) +{ + /* No arch-specific domain cleanup on ARM. */ +} + +static inline uint32_t arch_monitor_get_capabilities(struct domain *d) +{ + uint32_t capabilities = 0; + + capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST | + 1U << XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL); + + return capabilities; +} + +int monitor_smc(void); + +#endif /* __ASM_ARM_MONITOR_H__ */ diff --git a/xen/arch/arm/include/asm/new_vgic.h b/xen/arch/arm/include/asm/new_vgic.h new file mode 100644 index 0000000000..97d622bff6 --- /dev/null +++ b/xen/arch/arm/include/asm/new_vgic.h @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_ARM_NEW_VGIC_H +#define __ASM_ARM_NEW_VGIC_H + +#include +#include +#include +#include +#include + +#define VGIC_V3_MAX_CPUS 255 +#define VGIC_V2_MAX_CPUS 8 +#define VGIC_NR_SGIS 16 +#define VGIC_NR_PPIS 16 +#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) +#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1) +#define VGIC_MAX_SPI 1019 +#define VGIC_MAX_RESERVED 1023 +#define VGIC_MIN_LPI 8192 + +#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) +#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ + (irq) <= VGIC_MAX_SPI) + +enum vgic_type { + VGIC_V2, /* Good ol' GICv2 */ + VGIC_V3, /* New fancy GICv3 */ +}; + +#define VGIC_V2_MAX_LRS (1 << 6) +#define VGIC_V3_MAX_LRS 16 +#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + +#define VGIC_CONFIG_EDGE false +#define VGIC_CONFIG_LEVEL true + +struct vgic_irq { + struct list_head ap_list; + + struct vcpu *vcpu; /* + * SGIs and PPIs: The VCPU + * SPIs and LPIs: The VCPU whose ap_list + * this is queued on. + */ + + struct vcpu *target_vcpu; /* + * The VCPU that this interrupt should + * be sent to, as a result of the + * targets reg (v2) or the affinity reg (v3). + */ + + spinlock_t irq_lock; /* Protects the content of the struct */ + uint32_t intid; /* Guest visible INTID */ + atomic_t refcount; /* Used for LPIs */ + uint32_t hwintid; /* HW INTID number */ + union + { + struct { + uint8_t targets; /* GICv2 target VCPUs mask */ + uint8_t source; /* GICv2 SGIs only */ + }; + uint32_t mpidr; /* GICv3 target VCPU */ + }; + uint8_t priority; + bool line_level:1; /* Level only */ + bool pending_latch:1; /* + * The pending latch state used to + * calculate the pending state for both + * level and edge triggered IRQs. + */ + bool active:1; /* not used for LPIs */ + bool enabled:1; + bool hw:1; /* Tied to HW IRQ */ + bool config:1; /* Level or edge */ + struct list_head lpi_list; /* Used to link all LPIs together */ +}; + +enum iodev_type { + IODEV_DIST, + IODEV_REDIST, +}; + +struct vgic_io_device { + gfn_t base_fn; + struct vcpu *redist_vcpu; + const struct vgic_register_region *regions; + enum iodev_type iodev_type; + unsigned int nr_regions; +}; + +struct vgic_dist { + bool ready; + bool initialized; + + /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ + uint32_t version; + + /* Do injected MSIs require an additional device ID? */ + bool msis_require_devid; + + unsigned int nr_spis; + + /* base addresses in guest physical address space: */ + paddr_t vgic_dist_base; /* distributor */ + union + { + /* either a GICv2 CPU interface */ + paddr_t vgic_cpu_base; + /* or a number of GICv3 redistributor regions */ + struct + { + paddr_t vgic_redist_base; + paddr_t vgic_redist_free_offset; + }; + }; + + /* distributor enabled */ + bool enabled; + + struct vgic_irq *spis; + unsigned long *allocated_irqs; /* bitmap of IRQs allocated */ + + struct vgic_io_device dist_iodev; + + bool has_its; + + /* + * Contains the attributes and gpa of the LPI configuration table. + * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share + * one address across all redistributors. + * GICv3 spec: 6.1.2 "LPI Configuration tables" + */ + uint64_t propbaser; + + /* Protects the lpi_list and the count value below. */ + spinlock_t lpi_list_lock; + struct list_head lpi_list_head; + unsigned int lpi_list_count; +}; + +struct vgic_cpu { + struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + + struct list_head ap_list_head; + spinlock_t ap_list_lock; /* Protects the ap_list */ + + unsigned int used_lrs; + + /* + * List of IRQs that this VCPU should consider because they are either + * Active or Pending (hence the name; AP list), or because they recently + * were one of the two and need to be migrated off this list to another + * VCPU. + */ + + /* + * Members below are used with GICv3 emulation only and represent + * parts of the redistributor. + */ + struct vgic_io_device rd_iodev; + struct vgic_io_device sgi_iodev; + + /* Contains the attributes and gpa of the LPI pending tables. */ + uint64_t pendbaser; + + bool lpis_enabled; + + /* Cache guest priority bits */ + uint32_t num_pri_bits; + + /* Cache guest interrupt ID bits */ + uint32_t num_id_bits; +}; + +#endif /* __ASM_ARM_NEW_VGIC_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/nospec.h b/xen/arch/arm/include/asm/nospec.h new file mode 100644 index 0000000000..51c7aea4f4 --- /dev/null +++ b/xen/arch/arm/include/asm/nospec.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. */ + +#ifndef _ASM_ARM_NOSPEC_H +#define _ASM_ARM_NOSPEC_H + +static inline bool evaluate_nospec(bool condition) +{ + return condition; +} + +static inline void block_speculation(void) +{ +} + +#endif /* _ASM_ARM_NOSPEC_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/numa.h b/xen/arch/arm/include/asm/numa.h new file mode 100644 index 0000000000..31a6de4e23 --- /dev/null +++ b/xen/arch/arm/include/asm/numa.h @@ -0,0 +1,36 @@ +#ifndef __ARCH_ARM_NUMA_H +#define __ARCH_ARM_NUMA_H + +#include + +typedef u8 nodeid_t; + +/* Fake one node for now. See also node_online_map. */ +#define cpu_to_node(cpu) 0 +#define node_to_cpumask(node) (cpu_online_map) + +static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr) +{ + return 0; +} + +/* + * TODO: make first_valid_mfn static when NUMA is supported on Arm, this + * is required because the dummy helpers are using it. + */ +extern mfn_t first_valid_mfn; + +/* XXX: implement NUMA support */ +#define node_spanned_pages(nid) (max_page - mfn_x(first_valid_mfn)) +#define node_start_pfn(nid) (mfn_x(first_valid_mfn)) +#define __node_distance(a, b) (20) + +#endif /* __ARCH_ARM_NUMA_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/p2m.h b/xen/arch/arm/include/asm/p2m.h new file mode 100644 index 0000000000..8f11d9c97b --- /dev/null +++ b/xen/arch/arm/include/asm/p2m.h @@ -0,0 +1,439 @@ +#ifndef _XEN_P2M_H +#define _XEN_P2M_H + +#include +#include +#include +#include + +#include +#include + +#define paddr_bits PADDR_BITS + +/* Holds the bit size of IPAs in p2m tables. */ +extern unsigned int p2m_ipa_bits; + +#ifdef CONFIG_ARM_64 +extern unsigned int p2m_root_order; +extern unsigned int p2m_root_level; +#define P2M_ROOT_ORDER p2m_root_order +#define P2M_ROOT_LEVEL p2m_root_level +#else +/* First level P2M is always 2 consecutive pages */ +#define P2M_ROOT_ORDER 1 +#define P2M_ROOT_LEVEL 1 +#endif + +struct domain; + +extern void memory_type_changed(struct domain *); + +/* Per-p2m-table state */ +struct p2m_domain { + /* + * Lock that protects updates to the p2m. + */ + rwlock_t lock; + + /* Pages used to construct the p2m */ + struct page_list_head pages; + + /* The root of the p2m tree. May be concatenated */ + struct page_info *root; + + /* Current VMID in use */ + uint16_t vmid; + + /* Current Translation Table Base Register for the p2m */ + uint64_t vttbr; + + /* Highest guest frame that's ever been mapped in the p2m */ + gfn_t max_mapped_gfn; + + /* + * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a + * preemptible manner this is update to track recall where to + * resume the search. Apart from during teardown this can only + * decrease. */ + gfn_t lowest_mapped_gfn; + + /* Indicate if it is required to clean the cache when writing an entry */ + bool clean_pte; + + /* + * P2M updates may required TLBs to be flushed (invalidated). + * + * Flushes may be deferred by setting 'need_flush' and then flushing + * when the p2m write lock is released. + * + * If an immediate flush is required (e.g, if a super page is + * shattered), call p2m_tlb_flush_sync(). + */ + bool need_flush; + + /* Gather some statistics for information purposes only */ + struct { + /* Number of mappings at each p2m tree level */ + unsigned long mappings[4]; + /* Number of times we have shattered a mapping + * at each p2m tree level. */ + unsigned long shattered[4]; + } stats; + + /* + * If true, and an access fault comes in and there is no vm_event listener, + * pause domain. Otherwise, remove access restrictions. + */ + bool access_required; + + /* Defines if mem_access is in use for the domain. */ + bool mem_access_enabled; + + /* + * Default P2M access type for each page in the the domain: new pages, + * swapped in pages, cleared pages, and pages that are ambiguously + * retyped get this access type. See definition of p2m_access_t. + */ + p2m_access_t default_access; + + /* + * Radix tree to store the p2m_access_t settings as the pte's don't have + * enough available bits to store this information. + */ + struct radix_tree_root mem_access_settings; + + /* back pointer to domain */ + struct domain *domain; + + /* Keeping track on which CPU this p2m was used and for which vCPU */ + uint8_t last_vcpu_ran[NR_CPUS]; +}; + +/* + * List of possible type for each page in the p2m entry. + * The number of available bit per page in the pte for this purpose is 4 bits. + * So it's possible to only have 16 fields. If we run out of value in the + * future, it's possible to use higher value for pseudo-type and don't store + * them in the p2m entry. + */ +typedef enum { + p2m_invalid = 0, /* Nothing mapped here */ + p2m_ram_rw, /* Normal read/write guest RAM */ + p2m_ram_ro, /* Read-only; writes are silently dropped */ + p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */ + p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */ + p2m_mmio_direct_c, /* Read/write mapping of genuine MMIO area cacheable */ + p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */ + p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */ + p2m_grant_map_rw, /* Read/write grant mapping */ + p2m_grant_map_ro, /* Read-only grant mapping */ + /* The types below are only used to decide the page attribute in the P2M */ + p2m_iommu_map_rw, /* Read/write iommu mapping */ + p2m_iommu_map_ro, /* Read-only iommu mapping */ + p2m_max_real_type, /* Types after this won't be store in the p2m */ +} p2m_type_t; + +/* We use bitmaps and mask to handle groups of types */ +#define p2m_to_mask(_t) (1UL << (_t)) + +/* RAM types, which map to real machine frames */ +#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) | \ + p2m_to_mask(p2m_ram_ro)) + +/* Grant mapping types, which map to a real frame in another VM */ +#define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) | \ + p2m_to_mask(p2m_grant_map_ro)) + +/* Foreign mappings types */ +#define P2M_FOREIGN_TYPES (p2m_to_mask(p2m_map_foreign_rw) | \ + p2m_to_mask(p2m_map_foreign_ro)) + +/* Useful predicates */ +#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES) +#define p2m_is_foreign(_t) (p2m_to_mask(_t) & P2M_FOREIGN_TYPES) +#define p2m_is_any_ram(_t) (p2m_to_mask(_t) & \ + (P2M_RAM_TYPES | P2M_GRANT_TYPES | \ + P2M_FOREIGN_TYPES)) + +/* All common type definitions should live ahead of this inclusion. */ +#ifdef _XEN_P2M_COMMON_H +# error "xen/p2m-common.h should not be included directly" +#endif +#include + +static inline bool arch_acquire_resource_check(struct domain *d) +{ + /* + * The reference counting of foreign entries in set_foreign_p2m_entry() + * is supported on Arm. + */ + return true; +} + +static inline +void p2m_altp2m_check(struct vcpu *v, uint16_t idx) +{ + /* Not supported on ARM. */ +} + +/* + * Helper to restrict "p2m_ipa_bits" according the external entity + * (e.g. IOMMU) requirements. + * + * Each corresponding driver should report the maximum IPA bits + * (Stage-2 input size) it can support. + */ +void p2m_restrict_ipa_bits(unsigned int ipa_bits); + +/* Second stage paging setup, to be called on all CPUs */ +void setup_virt_paging(void); + +/* Init the datastructures for later use by the p2m code */ +int p2m_init(struct domain *d); + +/* Return all the p2m resources to Xen. */ +void p2m_teardown(struct domain *d); + +/* + * Remove mapping refcount on each mapping page in the p2m + * + * TODO: For the moment only foreign mappings are handled + */ +int relinquish_p2m_mapping(struct domain *d); + +/* Context switch */ +void p2m_save_state(struct vcpu *p); +void p2m_restore_state(struct vcpu *n); + +/* Print debugging/statistial info about a domain's p2m */ +void p2m_dump_info(struct domain *d); + +static inline void p2m_write_lock(struct p2m_domain *p2m) +{ + write_lock(&p2m->lock); +} + +void p2m_write_unlock(struct p2m_domain *p2m); + +static inline void p2m_read_lock(struct p2m_domain *p2m) +{ + read_lock(&p2m->lock); +} + +static inline void p2m_read_unlock(struct p2m_domain *p2m) +{ + read_unlock(&p2m->lock); +} + +static inline int p2m_is_locked(struct p2m_domain *p2m) +{ + return rw_is_locked(&p2m->lock); +} + +static inline int p2m_is_write_locked(struct p2m_domain *p2m) +{ + return rw_is_write_locked(&p2m->lock); +} + +void p2m_tlb_flush_sync(struct p2m_domain *p2m); + +/* Look up the MFN corresponding to a domain's GFN. */ +mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t); + +/* + * Get details of a given gfn. + * The P2M lock should be taken by the caller. + */ +mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, + p2m_type_t *t, p2m_access_t *a, + unsigned int *page_order, + bool *valid); + +/* + * Direct set a p2m entry: only for use by the P2M code. + * The P2M write lock should be taken. + */ +int p2m_set_entry(struct p2m_domain *p2m, + gfn_t sgfn, + unsigned long nr, + mfn_t smfn, + p2m_type_t t, + p2m_access_t a); + +bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn); + +void p2m_invalidate_root(struct p2m_domain *p2m); + +/* + * Clean & invalidate caches corresponding to a region [start,end) of guest + * address space. + * + * start will get updated if the function is preempted. + */ +int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end); + +void p2m_set_way_flush(struct vcpu *v, struct cpu_user_regs *regs, + const union hsr hsr); + +void p2m_toggle_cache(struct vcpu *v, bool was_enabled); + +void p2m_flush_vm(struct vcpu *v); + +/* + * Map a region in the guest p2m with a specific p2m type. + * The memory attributes will be derived from the p2m type. + */ +int map_regions_p2mt(struct domain *d, + gfn_t gfn, + unsigned long nr, + mfn_t mfn, + p2m_type_t p2mt); + +int unmap_regions_p2mt(struct domain *d, + gfn_t gfn, + unsigned long nr, + mfn_t mfn); + +int map_dev_mmio_region(struct domain *d, + gfn_t gfn, + unsigned long nr, + mfn_t mfn); + +int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr, + mfn_t mfn, p2m_type_t t); + +int guest_physmap_add_entry(struct domain *d, + gfn_t gfn, + mfn_t mfn, + unsigned long page_order, + p2m_type_t t); + +/* Untyped version for RAM only, for compatibility */ +static inline int __must_check +guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int page_order) +{ + return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw); +} + +static inline int guest_physmap_add_pages(struct domain *d, + gfn_t gfn, + mfn_t mfn, + unsigned int nr_pages) +{ + return p2m_insert_mapping(d, gfn, nr_pages, mfn, p2m_ram_rw); +} + +mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn); + +/* Look up a GFN and take a reference count on the backing page. */ +typedef unsigned int p2m_query_t; +#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ +#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ + +struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn, + p2m_type_t *t); + +static inline struct page_info *get_page_from_gfn( + struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) +{ + mfn_t mfn; + p2m_type_t _t; + struct page_info *page; + + /* + * Special case for DOMID_XEN as it is the only domain so far that is + * not auto-translated. + */ + if ( likely(d != dom_xen) ) + return p2m_get_page_from_gfn(d, _gfn(gfn), t); + + if ( !t ) + t = &_t; + + *t = p2m_invalid; + + /* + * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the + * page. + */ + mfn = _mfn(gfn); + page = mfn_to_page(mfn); + + if ( !mfn_valid(mfn) || !get_page(page, d) ) + return NULL; + + if ( page->u.inuse.type_info & PGT_writable_page ) + *t = p2m_ram_rw; + else + *t = p2m_ram_ro; + + return page; +} + +int get_page_type(struct page_info *page, unsigned long type); +bool is_iomem_page(mfn_t mfn); +static inline int get_page_and_type(struct page_info *page, + struct domain *domain, + unsigned long type) +{ + int rc = get_page(page, domain); + + if ( likely(rc) && unlikely(!get_page_type(page, type)) ) + { + put_page(page); + rc = 0; + } + + return rc; +} + +/* get host p2m table */ +#define p2m_get_hostp2m(d) (&(d)->arch.p2m) + +static inline bool p2m_vm_event_sanity_check(struct domain *d) +{ + return true; +} + +/* + * Return the start of the next mapping based on the order of the + * current one. + */ +static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order) +{ + /* + * The order corresponds to the order of the mapping (or invalid + * range) in the page table. So we need to align the GFN before + * incrementing. + */ + gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1)); + + return gfn_add(gfn, 1UL << order); +} + +/* + * A vCPU has cache enabled only when the MMU is enabled and data cache + * is enabled. + */ +static inline bool vcpu_has_cache_enabled(struct vcpu *v) +{ + const register_t mask = SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_M; + + /* Only works with the current vCPU */ + ASSERT(current == v); + + return (READ_SYSREG(SCTLR_EL1) & mask) == mask; +} + +#endif /* _XEN_P2M_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/page-bits.h b/xen/arch/arm/include/asm/page-bits.h new file mode 100644 index 0000000000..5d6477e599 --- /dev/null +++ b/xen/arch/arm/include/asm/page-bits.h @@ -0,0 +1,12 @@ +#ifndef __ARM_PAGE_SHIFT_H__ +#define __ARM_PAGE_SHIFT_H__ + +#define PAGE_SHIFT 12 + +#ifdef CONFIG_ARM_64 +#define PADDR_BITS 48 +#else +#define PADDR_BITS 40 +#endif + +#endif /* __ARM_PAGE_SHIFT_H__ */ diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h new file mode 100644 index 0000000000..c6f9fb0d4e --- /dev/null +++ b/xen/arch/arm/include/asm/page.h @@ -0,0 +1,293 @@ +#ifndef __ARM_PAGE_H__ +#define __ARM_PAGE_H__ + +#include +#include +#include +#include +#include + +/* Shareability values for the LPAE entries */ +#define LPAE_SH_NON_SHAREABLE 0x0 +#define LPAE_SH_UNPREDICTALE 0x1 +#define LPAE_SH_OUTER 0x2 +#define LPAE_SH_INNER 0x3 + +/* + * Attribute Indexes. + * + * These are valid in the AttrIndx[2:0] field of an LPAE stage 1 page + * table entry. They are indexes into the bytes of the MAIR* + * registers, as defined below. + * + */ +#define MT_DEVICE_nGnRnE 0x0 +#define MT_NORMAL_NC 0x1 +#define MT_NORMAL_WT 0x2 +#define MT_NORMAL_WB 0x3 +#define MT_DEVICE_nGnRE 0x4 +#define MT_NORMAL 0x7 + +/* + * LPAE Memory region attributes. Indexed by the AttrIndex bits of a + * LPAE entry; the 8-bit fields are packed little-endian into MAIR0 and MAIR1. + * + * See section "Device memory" B2.7.2 in ARM DDI 0487B.a for more + * details about the meaning of *G*R*E. + * + * ai encoding + * MT_DEVICE_nGnRnE 000 0000 0000 -- Strongly Ordered/Device nGnRnE + * MT_NORMAL_NC 001 0100 0100 -- Non-Cacheable + * MT_NORMAL_WT 010 1010 1010 -- Write-through + * MT_NORMAL_WB 011 1110 1110 -- Write-back + * MT_DEVICE_nGnRE 100 0000 0100 -- Device nGnRE + * ?? 101 + * reserved 110 + * MT_NORMAL 111 1111 1111 -- Write-back write-allocate + * + * /!\ It is not possible to combine the definition in MAIRVAL and then + * split because it would result to a 64-bit value that some assembler + * doesn't understand. + */ +#define _MAIR0(attr, mt) (_AC(attr, ULL) << ((mt) * 8)) +#define _MAIR1(attr, mt) (_AC(attr, ULL) << (((mt) * 8) - 32)) + +#define MAIR0VAL (_MAIR0(0x00, MT_DEVICE_nGnRnE)| \ + _MAIR0(0x44, MT_NORMAL_NC) | \ + _MAIR0(0xaa, MT_NORMAL_WT) | \ + _MAIR0(0xee, MT_NORMAL_WB)) + +#define MAIR1VAL (_MAIR1(0x04, MT_DEVICE_nGnRE) | \ + _MAIR1(0xff, MT_NORMAL)) + +#define MAIRVAL (MAIR1VAL << 32 | MAIR0VAL) + +/* + * Layout of the flags used for updating the hypervisor page tables + * + * [0:2] Memory Attribute Index + * [3:4] Permission flags + * [5] Page present + * [6] Only populate page tables + */ +#define PAGE_AI_MASK(x) ((x) & 0x7U) + +#define _PAGE_XN_BIT 3 +#define _PAGE_RO_BIT 4 +#define _PAGE_XN (1U << _PAGE_XN_BIT) +#define _PAGE_RO (1U << _PAGE_RO_BIT) +#define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x1U) +#define PAGE_RO_MASK(x) (((x) >> _PAGE_RO_BIT) & 0x1U) + +#define _PAGE_PRESENT (1U << 5) +#define _PAGE_POPULATE (1U << 6) + +/* + * _PAGE_DEVICE and _PAGE_NORMAL are convenience defines. They are not + * meant to be used outside of this header. + */ +#define _PAGE_DEVICE (_PAGE_XN|_PAGE_PRESENT) +#define _PAGE_NORMAL (MT_NORMAL|_PAGE_PRESENT) + +#define PAGE_HYPERVISOR_RO (_PAGE_NORMAL|_PAGE_RO|_PAGE_XN) +#define PAGE_HYPERVISOR_RX (_PAGE_NORMAL|_PAGE_RO) +#define PAGE_HYPERVISOR_RW (_PAGE_NORMAL|_PAGE_XN) + +#define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW +#define PAGE_HYPERVISOR_NOCACHE (_PAGE_DEVICE|MT_DEVICE_nGnRE) +#define PAGE_HYPERVISOR_WC (_PAGE_DEVICE|MT_NORMAL_NC) + +/* + * Stage 2 Memory Type. + * + * These are valid in the MemAttr[3:0] field of an LPAE stage 2 page + * table entry. + * + */ +#define MATTR_DEV 0x1 +#define MATTR_MEM_NC 0x5 +#define MATTR_MEM 0xf + +/* Flags for get_page_from_gva, gvirt_to_maddr etc */ +#define GV2M_READ (0u<<0) +#define GV2M_WRITE (1u<<0) +#define GV2M_EXEC (1u<<1) + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +/* Architectural minimum cacheline size is 4 32-bit words. */ +#define MIN_CACHELINE_BYTES 16 +/* Min dcache line size on the boot CPU. */ +extern size_t dcache_line_bytes; + +#define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE) + +static inline size_t read_dcache_line_bytes(void) +{ + register_t ctr; + + /* Read CTR */ + ctr = READ_SYSREG(CTR_EL0); + + /* Bits 16-19 are the log2 number of words in the cacheline. */ + return (size_t) (4 << ((ctr >> 16) & 0xf)); +} + +/* Functions for flushing medium-sized areas. + * if 'range' is large enough we might want to use model-specific + * full-cache flushes. */ + +static inline int invalidate_dcache_va_range(const void *p, unsigned long size) +{ + const void *end = p + size; + size_t cacheline_mask = dcache_line_bytes - 1; + + dsb(sy); /* So the CPU issues all writes to the range */ + + if ( (uintptr_t)p & cacheline_mask ) + { + p = (void *)((uintptr_t)p & ~cacheline_mask); + asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); + p += dcache_line_bytes; + } + if ( (uintptr_t)end & cacheline_mask ) + { + end = (void *)((uintptr_t)end & ~cacheline_mask); + asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end)); + } + + for ( ; p < end; p += dcache_line_bytes ) + asm volatile (__invalidate_dcache_one(0) : : "r" (p)); + + dsb(sy); /* So we know the flushes happen before continuing */ + + return 0; +} + +static inline int clean_dcache_va_range(const void *p, unsigned long size) +{ + const void *end = p + size; + dsb(sy); /* So the CPU issues all writes to the range */ + p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1)); + for ( ; p < end; p += dcache_line_bytes ) + asm volatile (__clean_dcache_one(0) : : "r" (p)); + dsb(sy); /* So we know the flushes happen before continuing */ + /* ARM callers assume that dcache_* functions cannot fail. */ + return 0; +} + +static inline int clean_and_invalidate_dcache_va_range + (const void *p, unsigned long size) +{ + const void *end = p + size; + dsb(sy); /* So the CPU issues all writes to the range */ + p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1)); + for ( ; p < end; p += dcache_line_bytes ) + asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); + dsb(sy); /* So we know the flushes happen before continuing */ + /* ARM callers assume that dcache_* functions cannot fail. */ + return 0; +} + +/* Macros for flushing a single small item. The predicate is always + * compile-time constant so this will compile down to 3 instructions in + * the common case. */ +#define clean_dcache(x) do { \ + typeof(x) *_p = &(x); \ + if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \ + clean_dcache_va_range(_p, sizeof(x)); \ + else \ + asm volatile ( \ + "dsb sy;" /* Finish all earlier writes */ \ + __clean_dcache_one(0) \ + "dsb sy;" /* Finish flush before continuing */ \ + : : "r" (_p), "m" (*_p)); \ +} while (0) + +#define clean_and_invalidate_dcache(x) do { \ + typeof(x) *_p = &(x); \ + if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \ + clean_and_invalidate_dcache_va_range(_p, sizeof(x)); \ + else \ + asm volatile ( \ + "dsb sy;" /* Finish all earlier writes */ \ + __clean_and_invalidate_dcache_one(0) \ + "dsb sy;" /* Finish flush before continuing */ \ + : : "r" (_p), "m" (*_p)); \ +} while (0) + +/* Flush the dcache for an entire page. */ +void flush_page_to_ram(unsigned long mfn, bool sync_icache); + +/* + * Print a walk of a page table or p2m + * + * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2) + * addr is the PA or IPA to translate + * root_level is the starting level of the page table + * (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 ) + * nr_root_tables is the number of concatenated tables at the root. + * this can only be != 1 for P2M walks starting at the first or + * subsequent level. + */ +void dump_pt_walk(paddr_t ttbr, paddr_t addr, + unsigned int root_level, + unsigned int nr_root_tables); + +/* Print a walk of the hypervisor's page tables for a virtual addr. */ +extern void dump_hyp_walk(vaddr_t addr); +/* Print a walk of the p2m for a domain for a physical address. */ +extern void dump_p2m_lookup(struct domain *d, paddr_t addr); + +static inline uint64_t va_to_par(vaddr_t va) +{ + uint64_t par = __va_to_par(va); + /* It is not OK to call this with an invalid VA */ + if ( par & PAR_F ) + { + dump_hyp_walk(va); + panic_PAR(par); + } + return par; +} + +static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr, unsigned int flags) +{ + uint64_t par = gva_to_ipa_par(va, flags); + if ( par & PAR_F ) + return -EFAULT; + *paddr = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK); + return 0; +} + +/* Bits in the PAR returned by va_to_par */ +#define PAR_FAULT 0x1 + +#endif /* __ASSEMBLY__ */ + +#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) + +#endif /* __ARM_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/paging.h b/xen/arch/arm/include/asm/paging.h new file mode 100644 index 0000000000..6d1a000246 --- /dev/null +++ b/xen/arch/arm/include/asm/paging.h @@ -0,0 +1,16 @@ +#ifndef _XEN_PAGING_H +#define _XEN_PAGING_H + +#define paging_mode_translate(d) (1) +#define paging_mode_external(d) (1) + +#endif /* XEN_PAGING_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/pci.h b/xen/arch/arm/include/asm/pci.h new file mode 100644 index 0000000000..9736d6816d --- /dev/null +++ b/xen/arch/arm/include/asm/pci.h @@ -0,0 +1,133 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM_PCI_H__ +#define __ARM_PCI_H__ + +#ifdef CONFIG_HAS_PCI + +#define pci_to_dev(pcidev) (&(pcidev)->arch.dev) + +extern bool pci_passthrough_enabled; + +/* Arch pci dev struct */ +struct arch_pci_dev { + struct device dev; +}; + +/* Arch-specific MSI data for vPCI. */ +struct vpci_arch_msi { +}; + +/* Arch-specific MSI-X entry data for vPCI. */ +struct vpci_arch_msix_entry { +}; + +/* + * Because of the header cross-dependencies, e.g. we need both + * struct pci_dev and struct arch_pci_dev at the same time, this cannot be + * done with an inline here. Macro can be implemented, but looks scary. + */ +struct pci_dev *dev_to_pci(struct device *dev); + +/* + * struct to hold the mappings of a config space window. This + * is expected to be used as sysdata for PCI controllers that + * use ECAM. + */ +struct pci_config_window { + paddr_t phys_addr; + paddr_t size; + uint8_t busn_start; + uint8_t busn_end; + void __iomem *win; +}; + +/* + * struct to hold pci host bridge information + * for a PCI controller. + */ +struct pci_host_bridge { + struct dt_device_node *dt_node; /* Pointer to the associated DT node */ + struct list_head node; /* Node in list of host bridges */ + uint16_t segment; /* Segment number */ + struct pci_config_window* cfg; /* Pointer to the bridge config window */ + const struct pci_ops *ops; +}; + +struct pci_ops { + void __iomem *(*map_bus)(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, + uint32_t offset); + int (*read)(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, + uint32_t reg, uint32_t len, uint32_t *value); + int (*write)(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, + uint32_t reg, uint32_t len, uint32_t value); +}; + +/* + * struct to hold pci ops and bus shift of the config window + * for a PCI controller. + */ +struct pci_ecam_ops { + unsigned int bus_shift; + struct pci_ops pci_ops; + int (*cfg_reg_index)(struct dt_device_node *dev); + int (*init)(struct pci_config_window *); +}; + +/* Default ECAM ops */ +extern const struct pci_ecam_ops pci_generic_ecam_ops; + +int pci_host_common_probe(struct dt_device_node *dev, + const struct pci_ecam_ops *ops); +int pci_generic_config_read(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, + uint32_t reg, uint32_t len, uint32_t *value); +int pci_generic_config_write(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, + uint32_t reg, uint32_t len, uint32_t value); +void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, + pci_sbdf_t sbdf, uint32_t where); +struct pci_host_bridge *pci_find_host_bridge(uint16_t segment, uint8_t bus); +struct dt_device_node *pci_find_host_bridge_node(struct device *dev); +int pci_get_host_bridge_segment(const struct dt_device_node *node, + uint16_t *segment); + +static always_inline bool is_pci_passthrough_enabled(void) +{ + return pci_passthrough_enabled; +} + +void arch_pci_init_pdev(struct pci_dev *pdev); + +#else /*!CONFIG_HAS_PCI*/ + +struct arch_pci_dev { }; + +static always_inline bool is_pci_passthrough_enabled(void) +{ + return false; +} + +struct pci_dev; + +static inline void arch_pci_init_pdev(struct pci_dev *pdev) {} + +static inline int pci_get_host_bridge_segment(const struct dt_device_node *node, + uint16_t *segment) +{ + ASSERT_UNREACHABLE(); + return -EINVAL; +} + +#endif /*!CONFIG_HAS_PCI*/ +#endif /* __ARM_PCI_H__ */ diff --git a/xen/arch/arm/include/asm/percpu.h b/xen/arch/arm/include/asm/percpu.h new file mode 100644 index 0000000000..f1a8768080 --- /dev/null +++ b/xen/arch/arm/include/asm/percpu.h @@ -0,0 +1,33 @@ +#ifndef __ARM_PERCPU_H__ +#define __ARM_PERCPU_H__ + +#ifndef __ASSEMBLY__ + +#include +#include + +extern char __per_cpu_start[], __per_cpu_data_end[]; +extern unsigned long __per_cpu_offset[NR_CPUS]; +void percpu_init_areas(void); + +#define per_cpu(var, cpu) \ + (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) +#define this_cpu(var) \ + (*RELOC_HIDE(&per_cpu__##var, READ_SYSREG(TPIDR_EL2))) + +#define per_cpu_ptr(var, cpu) \ + (*RELOC_HIDE(var, __per_cpu_offset[cpu])) +#define this_cpu_ptr(var) \ + (*RELOC_HIDE(var, READ_SYSREG(TPIDR_EL2))) + +#endif + +#endif /* __ARM_PERCPU_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/perfc.h b/xen/arch/arm/include/asm/perfc.h new file mode 100644 index 0000000000..95c4b2b6b7 --- /dev/null +++ b/xen/arch/arm/include/asm/perfc.h @@ -0,0 +1,21 @@ +#ifndef __ASM_PERFC_H__ +#define __ASM_PERFC_H__ + +static inline void arch_perfc_reset(void) +{ +} + +static inline void arch_perfc_gather(void) +{ +} + +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/perfc_defn.h b/xen/arch/arm/include/asm/perfc_defn.h new file mode 100644 index 0000000000..31f071222b --- /dev/null +++ b/xen/arch/arm/include/asm/perfc_defn.h @@ -0,0 +1,89 @@ +/* This file is legitimately included multiple times. */ +/*#ifndef __XEN_PERFC_DEFN_H__*/ +/*#define __XEN_PERFC_DEFN_H__*/ + +PERFCOUNTER(invalid_hypercalls, "invalid hypercalls") + +PERFCOUNTER(trap_wfi, "trap: wfi") +PERFCOUNTER(trap_wfe, "trap: wfe") +PERFCOUNTER(trap_cp15_32, "trap: cp15 32-bit access") +PERFCOUNTER(trap_cp15_64, "trap: cp15 64-bit access") +PERFCOUNTER(trap_cp14_32, "trap: cp14 32-bit access") +PERFCOUNTER(trap_cp14_64, "trap: cp14 64-bit access") +PERFCOUNTER(trap_cp14_dbg, "trap: cp14 dbg access") +PERFCOUNTER(trap_cp10, "trap: cp10 access") +PERFCOUNTER(trap_cp, "trap: cp access") +PERFCOUNTER(trap_smc32, "trap: 32-bit smc") +PERFCOUNTER(trap_hvc32, "trap: 32-bit hvc") +#ifdef CONFIG_ARM_64 +PERFCOUNTER(trap_smc64, "trap: 64-bit smc") +PERFCOUNTER(trap_hvc64, "trap: 64-bit hvc") +PERFCOUNTER(trap_sysreg, "trap: sysreg access") +#endif +PERFCOUNTER(trap_iabt, "trap: guest instr abort") +PERFCOUNTER(trap_dabt, "trap: guest data abort") +PERFCOUNTER(trap_uncond, "trap: condition failed") + +PERFCOUNTER(vpsci_cpu_on, "vpsci: cpu_on") +PERFCOUNTER(vpsci_cpu_off, "vpsci: cpu_off") +PERFCOUNTER(vpsci_version, "vpsci: version") +PERFCOUNTER(vpsci_migrate_info_type, "vpsci: migrate_info_type") +PERFCOUNTER(vpsci_system_off, "vpsci: system_off") +PERFCOUNTER(vpsci_system_reset, "vpsci: system_reset") +PERFCOUNTER(vpsci_cpu_suspend, "vpsci: cpu_suspend") +PERFCOUNTER(vpsci_cpu_affinity_info, "vpsci: cpu_affinity_info") +PERFCOUNTER(vpsci_features, "vpsci: features") + +PERFCOUNTER(vcpu_kick, "vcpu: notify other vcpu") + +PERFCOUNTER(vgicd_reads, "vgicd: read") +PERFCOUNTER(vgicd_writes, "vgicd: write") +PERFCOUNTER(vgicr_reads, "vgicr: read") +PERFCOUNTER(vgicr_writes, "vgicr: write") +PERFCOUNTER(vgic_cp64_reads, "vgic: cp64 read") +PERFCOUNTER(vgic_cp64_writes, "vgic: cp64 write") +PERFCOUNTER(vgic_sysreg_reads, "vgic: sysreg read") +PERFCOUNTER(vgic_sysreg_writes, "vgic: sysreg write") +PERFCOUNTER(vgic_sgi_list , "vgic: SGI send to list") +PERFCOUNTER(vgic_sgi_others, "vgic: SGI send to others") +PERFCOUNTER(vgic_sgi_self, "vgic: SGI send to self") +PERFCOUNTER(vgic_irq_migrates, "vgic: irq migration") + +PERFCOUNTER(vuart_reads, "vuart: read") +PERFCOUNTER(vuart_writes, "vuart: write") + +PERFCOUNTER(vtimer_cp32_reads, "vtimer: cp32 read") +PERFCOUNTER(vtimer_cp32_writes, "vtimer: cp32 write") + +PERFCOUNTER(vtimer_cp64_reads, "vtimer: cp64 read") +PERFCOUNTER(vtimer_cp64_writes, "vtimer: cp64 write") + +PERFCOUNTER(vtimer_sysreg_reads, "vtimer: sysreg read") +PERFCOUNTER(vtimer_sysreg_writes, "vtimer: sysreg write") + +PERFCOUNTER(vtimer_phys_inject, "vtimer: phys expired, injected") +PERFCOUNTER(vtimer_phys_masked, "vtimer: phys expired, masked") +PERFCOUNTER(vtimer_virt_inject, "vtimer: virt expired, injected") + +PERFCOUNTER(ppis, "#PPIs") +PERFCOUNTER(spis, "#SPIs") +PERFCOUNTER(guest_irqs, "#GUEST-IRQS") + +PERFCOUNTER(hyp_timer_irqs, "Hypervisor timer interrupts") +PERFCOUNTER(phys_timer_irqs, "Physical timer interrupts") +PERFCOUNTER(virt_timer_irqs, "Virtual timer interrupts") +PERFCOUNTER(maintenance_irqs, "Maintenance interrupts") + +PERFCOUNTER(atomics_guest, "atomics: guest access") +PERFCOUNTER(atomics_guest_paused, "atomics: guest paused") + +/*#endif*/ /* __XEN_PERFC_DEFN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/pl011-uart.h b/xen/arch/arm/include/asm/pl011-uart.h new file mode 100644 index 0000000000..57e9ec73ac --- /dev/null +++ b/xen/arch/arm/include/asm/pl011-uart.h @@ -0,0 +1,87 @@ +/* + * xen/include/asm-arm/pl011-uart.h + * + * Common constant definition between early printk and the UART driver + * for the pl011 UART + * + * Tim Deegan + * Copyright (c) 2011 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_PL011_H +#define __ASM_ARM_PL011_H + +/* PL011 register addresses */ +#define DR (0x00) +#define RSR (0x04) +#define FR (0x18) +#define ILPR (0x20) +#define IBRD (0x24) +#define FBRD (0x28) +#define LCR_H (0x2c) +#define CR (0x30) +#define IFLS (0x34) +#define IMSC (0x38) +#define RIS (0x3c) +#define MIS (0x40) +#define ICR (0x44) +#define DMACR (0x48) + +/* CR bits */ +#define CTSEN (1<<15) /* automatic CTS hardware flow control */ +#define RTSEN (1<<14) /* automatic RTS hardware flow control */ +#define RTS (1<<11) /* RTS signal */ +#define DTR (1<<10) /* DTR signal */ +#define RXE (1<<9) /* Receive enable */ +#define TXE (1<<8) /* Transmit enable */ +#define UARTEN (1<<0) /* UART enable */ + +/* FR bits */ +#define TXFE (1<<7) /* TX FIFO empty */ +#define RXFE (1<<4) /* RX FIFO empty */ +#define TXFF (1<<5) /* TX FIFO full */ +#define RXFF (1<<6) /* RX FIFO full */ +#define BUSY (1<<3) /* Transmit is not complete */ + +/* LCR_H bits */ +#define SPS (1<<7) /* Stick parity select */ +#define FEN (1<<4) /* FIFO enable */ +#define STP2 (1<<3) /* Two stop bits select */ +#define EPS (1<<2) /* Even parity select */ +#define PEN (1<<1) /* Parity enable */ +#define BRK (1<<0) /* Send break */ + +/* Interrupt bits (IMSC, MIS, ICR) */ +#define OEI (1<<10) /* Overrun Error interrupt mask */ +#define BEI (1<<9) /* Break Error interrupt mask */ +#define PEI (1<<8) /* Parity Error interrupt mask */ +#define FEI (1<<7) /* Framing Error interrupt mask */ +#define RTI (1<<6) /* Receive Timeout interrupt mask */ +#define TXI (1<<5) /* Transmit interrupt mask */ +#define RXI (1<<4) /* Receive interrupt mask */ +#define DSRMI (1<<3) /* nUARTDSR Modem interrupt mask */ +#define DCDMI (1<<2) /* nUARTDCD Modem interrupt mask */ +#define CTSMI (1<<1) /* nUARTCTS Modem interrupt mask */ +#define RIMI (1<<0) /* nUARTRI Modem interrupt mask */ +#define ALLI OEI|BEI|PEI|FEI|RTI|TXI|RXI|DSRMI|DCDMI|CTSMI|RIMI + +#endif /* __ASM_ARM_PL011_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/platform.h b/xen/arch/arm/include/asm/platform.h new file mode 100644 index 0000000000..997eb25216 --- /dev/null +++ b/xen/arch/arm/include/asm/platform.h @@ -0,0 +1,82 @@ +#ifndef __ASM_ARM_PLATFORM_H +#define __ASM_ARM_PLATFORM_H + +#include +#include +#include + +/* Describe specific operation for a board */ +struct platform_desc { + /* Platform name */ + const char *name; + /* Array of device tree 'compatible' strings */ + const char *const *compatible; + /* Platform initialization */ + int (*init)(void); + int (*init_time)(void); +#ifdef CONFIG_ARM_32 + /* SMP */ + int (*smp_init)(void); + int (*cpu_up)(int cpu); +#endif + /* Specific mapping for dom0 */ + int (*specific_mapping)(struct domain *d); + /* Platform reset */ + void (*reset)(void); + /* Platform power-off */ + void (*poweroff)(void); + /* Platform specific SMC handler */ + bool (*smc)(struct cpu_user_regs *regs); + /* + * Platform quirks + * Defined has a function because a platform can support multiple + * board with different quirk on each + */ + uint32_t (*quirks)(void); + /* + * Platform blacklist devices + * List of devices which must not pass-through to a guest + */ + const struct dt_device_match *blacklist_dev; + /* Override the DMA width (32-bit by default). */ + unsigned int dma_bitsize; +}; + +/* + * Quirk for platforms where device tree incorrectly reports 4K GICC + * size, but actually the two GICC register ranges are placed at 64K + * stride. + */ +#define PLATFORM_QUIRK_GIC_64K_STRIDE (1 << 0) + +void platform_init(void); +int platform_init_time(void); +int platform_specific_mapping(struct domain *d); +#ifdef CONFIG_ARM_32 +int platform_smp_init(void); +int platform_cpu_up(int cpu); +#endif +void platform_reset(void); +void platform_poweroff(void); +bool platform_smc(struct cpu_user_regs *regs); +bool platform_has_quirk(uint32_t quirk); +bool platform_device_is_blacklisted(const struct dt_device_node *node); + +#define PLATFORM_START(_name, _namestr) \ +static const struct platform_desc __plat_desc_##_name __used \ +__section(".arch.info") = { \ + .name = _namestr, + +#define PLATFORM_END \ +}; + +#endif /* __ASM_ARM_PLATFORM_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/platforms/exynos5.h b/xen/arch/arm/include/asm/platforms/exynos5.h new file mode 100644 index 0000000000..aef5c67084 --- /dev/null +++ b/xen/arch/arm/include/asm/platforms/exynos5.h @@ -0,0 +1,20 @@ +#ifndef __ASM_ARM_PLATFORMS_EXYNOS5_H +#define __ASM_ARM_PLATFORMS_EXYNOS5_H + +#define EXYNOS5_MCT_G_TCON 0x240 /* Relative to MCT_BASE */ +#define EXYNOS5_MCT_G_TCON_START (1 << 8) + +#define EXYNOS5_PA_CHIPID 0x10000000 +#define EXYNOS5_PA_TIMER 0x12dd0000 + +#define EXYNOS5_SWRESET 0x0400 /* Relative to PA_PMU */ + +#endif /* __ASM_ARM_PLATFORMS_EXYNOS5_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/platforms/midway.h b/xen/arch/arm/include/asm/platforms/midway.h new file mode 100644 index 0000000000..099e4350f9 --- /dev/null +++ b/xen/arch/arm/include/asm/platforms/midway.h @@ -0,0 +1,21 @@ +#ifndef __ASM_ARM_PLATFORMS_MIDWAY_H +#define __ASM_ASM_PLATFORMS_MIDWAY_H + +/* addresses of SREG registers for resetting the SoC */ +#define MW_SREG_PWR_REQ 0xfff3cf00 +#define MW_SREG_A15_PWR_CTRL 0xfff3c200 + +#define MW_PWR_SUSPEND 0 +#define MW_PWR_SOFT_RESET 1 +#define MW_PWR_HARD_RESET 2 +#define MW_PWR_SHUTDOWN 3 + +#endif /* __ASM_ARM_PLATFORMS_MIDWAY_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/platforms/omap5.h b/xen/arch/arm/include/asm/platforms/omap5.h new file mode 100644 index 0000000000..c559c84b61 --- /dev/null +++ b/xen/arch/arm/include/asm/platforms/omap5.h @@ -0,0 +1,32 @@ +#ifndef __ASM_ARM_PLATFORMS_OMAP5_H +#define __ASM_ASM_PLATFORMS_OMAP5_H + +#define REALTIME_COUNTER_BASE 0x48243200 +#define INCREMENTER_NUMERATOR_OFFSET 0x10 +#define INCREMENTER_DENUMERATOR_RELOAD_OFFSET 0x14 +#define NUMERATOR_DENUMERATOR_MASK 0xfffff000 +#define PRM_FRAC_INCREMENTER_DENUMERATOR_RELOAD 0x00010000 + +#define OMAP5_L4_WKUP 0x4AE00000 +#define OMAP5_PRM_BASE (OMAP5_L4_WKUP + 0x6000) +#define OMAP5_CKGEN_PRM_BASE (OMAP5_PRM_BASE + 0x100) +#define OMAP5_CM_CLKSEL_SYS 0x10 +#define SYS_CLKSEL_MASK 0xfffffff8 + +#define OMAP5_PRCM_MPU_BASE 0x48243000 +#define OMAP5_WKUPGEN_BASE 0x48281000 +#define OMAP5_SRAM_PA 0x40300000 + +#define OMAP_AUX_CORE_BOOT_0_OFFSET 0x800 +#define OMAP_AUX_CORE_BOOT_1_OFFSET 0x804 + +#endif /* __ASM_ARM_PLATFORMS_OMAP5_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/platforms/vexpress.h b/xen/arch/arm/include/asm/platforms/vexpress.h new file mode 100644 index 0000000000..8b45d3a850 --- /dev/null +++ b/xen/arch/arm/include/asm/platforms/vexpress.h @@ -0,0 +1,37 @@ +#ifndef __ASM_ARM_PLATFORMS_VEXPRESS_H +#define __ASM_ARM_PLATFORMS_VEXPRESS_H + +/* V2M */ +#define V2M_SYS_MMIO_BASE (0x1c010000) +#define V2M_SYS_FLAGSSET (0x30) +#define V2M_SYS_FLAGSCLR (0x34) + +#define V2M_SYS_CFGDATA (0x00A0) +#define V2M_SYS_CFGCTRL (0x00A4) +#define V2M_SYS_CFGSTAT (0x00A8) + +#define V2M_SYS_CFG_START (1<<31) +#define V2M_SYS_CFG_WRITE (1<<30) +#define V2M_SYS_CFG_ERROR (1<<1) +#define V2M_SYS_CFG_COMPLETE (1<<0) + +#define V2M_SYS_CFG_OSC_FUNC 1 +#define V2M_SYS_CFG_OSC0 0 +#define V2M_SYS_CFG_OSC1 1 +#define V2M_SYS_CFG_OSC2 2 +#define V2M_SYS_CFG_OSC3 3 +#define V2M_SYS_CFG_OSC4 4 +#define V2M_SYS_CFG_OSC5 5 + +/* Board-specific: base address of system controller */ +#define SP810_ADDRESS 0x1C020000 + +#endif /* __ASM_ARM_PLATFORMS_VEXPRESS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/platforms/xilinx-zynqmp-eemi.h b/xen/arch/arm/include/asm/platforms/xilinx-zynqmp-eemi.h new file mode 100644 index 0000000000..cf25a9014d --- /dev/null +++ b/xen/arch/arm/include/asm/platforms/xilinx-zynqmp-eemi.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2018 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_PLATFORMS_ZYNQMP_H +#define __ASM_ARM_PLATFORMS_ZYNQMP_H + +#include +#include + +#define EEMI_FID(fid) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_64, \ + ARM_SMCCC_OWNER_SIP, \ + fid) + +enum pm_api_id { + /* Miscellaneous API functions: */ + PM_GET_API_VERSION = 1, /* Do not change or move */ + PM_SET_CONFIGURATION, + PM_GET_NODE_STATUS, + PM_GET_OP_CHARACTERISTIC, + PM_REGISTER_NOTIFIER, + /* API for suspending of PUs: */ + PM_REQ_SUSPEND, + PM_SELF_SUSPEND, + PM_FORCE_POWERDOWN, + PM_ABORT_SUSPEND, + PM_REQ_WAKEUP, + PM_SET_WAKEUP_SOURCE, + PM_SYSTEM_SHUTDOWN, + /* API for managing PM slaves: */ + PM_REQ_NODE, + PM_RELEASE_NODE, + PM_SET_REQUIREMENT, + PM_SET_MAX_LATENCY, + /* Direct control API functions: */ + PM_RESET_ASSERT, + PM_RESET_GET_STATUS, + PM_MMIO_WRITE, + PM_MMIO_READ, + PM_INIT, + PM_FPGA_LOAD, + PM_FPGA_GET_STATUS, + PM_GET_CHIPID, + /* ID 25 is been used by U-boot to process secure boot images */ + /* Secure library generic API functions */ + PM_SECURE_SHA = 26, + PM_SECURE_RSA, + /* Pin control API functions */ + PM_PINCTRL_REQUEST, + PM_PINCTRL_RELEASE, + PM_PINCTRL_GET_FUNCTION, + PM_PINCTRL_SET_FUNCTION, + PM_PINCTRL_CONFIG_PARAM_GET, + PM_PINCTRL_CONFIG_PARAM_SET, + /* PM IOCTL API */ + PM_IOCTL, + /* API to query information from firmware */ + PM_QUERY_DATA, + /* Clock control API functions */ + PM_CLOCK_ENABLE, + PM_CLOCK_DISABLE, + PM_CLOCK_GETSTATE, + PM_CLOCK_SETDIVIDER, + PM_CLOCK_GETDIVIDER, + PM_CLOCK_SETRATE, + PM_CLOCK_GETRATE, + PM_CLOCK_SETPARENT, + PM_CLOCK_GETPARENT, + PM_GET_TRUSTZONE_VERSION = 2563, + PM_API_MAX +}; + +/** + * @XST_PM_SUCCESS: Success + * @XST_PM_INTERNAL: Unexpected error + * @XST_PM_CONFLICT: Conflicting requirements + * @XST_PM_NO_ACCESS: Access rights violation + * @XST_PM_INVALID_NODE: Does not apply to node passed as argument + * @XST_PM_DOUBLE_REQ: Duplicate request + * @XST_PM_ABORT_SUSPEND: Target has aborted suspend + */ +enum pm_ret_status { + XST_PM_SUCCESS = 0, + XST_PM_INTERNAL = 2000, + XST_PM_CONFLICT, + XST_PM_NO_ACCESS, + XST_PM_INVALID_NODE, + XST_PM_DOUBLE_REQ, + XST_PM_ABORT_SUSPEND, +}; + +/* IPI SMC function numbers enum definition and fids */ +#define IPI_MAILBOX_FID(fid) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_SIP, \ + fid) +enum ipi_api_id { + IPI_MAILBOX_OPEN = 0x1000, + IPI_MAILBOX_RELEASE, + IPI_MAILBOX_STATUS_ENQUIRY, + IPI_MAILBOX_NOTIFY, + IPI_MAILBOX_ACK, + IPI_MAILBOX_ENABLE_IRQ, + IPI_MAILBOX_DISABLE_IRQ, +}; + +extern bool zynqmp_eemi(struct cpu_user_regs *regs); + +#endif /* __ASM_ARM_PLATFORMS_ZYNQMP_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/processor.h b/xen/arch/arm/include/asm/processor.h new file mode 100644 index 0000000000..8ab2940f68 --- /dev/null +++ b/xen/arch/arm/include/asm/processor.h @@ -0,0 +1,598 @@ +#ifndef __ASM_ARM_PROCESSOR_H +#define __ASM_ARM_PROCESSOR_H + +#ifndef __ASSEMBLY__ +#include +#endif +#include + +/* CTR Cache Type Register */ +#define CTR_L1IP_MASK 0x3 +#define CTR_L1IP_SHIFT 14 +#define CTR_DMINLINE_SHIFT 16 +#define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf +#define CTR_ERG_SHIFT 20 +#define CTR_CWG_SHIFT 24 +#define CTR_CWG_MASK 15 +#define CTR_IDC_SHIFT 28 +#define CTR_DIC_SHIFT 29 + +#define ICACHE_POLICY_VPIPT 0 +#define ICACHE_POLICY_AIVIVT 1 +#define ICACHE_POLICY_VIPT 2 +#define ICACHE_POLICY_PIPT 3 + +/* MIDR Main ID Register */ +#define MIDR_REVISION_MASK 0xf +#define MIDR_RESIVION(midr) ((midr) & MIDR_REVISION_MASK) +#define MIDR_PARTNUM_SHIFT 4 +#define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT) +#define MIDR_PARTNUM(midr) \ + (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT) +#define MIDR_ARCHITECTURE_SHIFT 16 +#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT) +#define MIDR_ARCHITECTURE(midr) \ + (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT) +#define MIDR_VARIANT_SHIFT 20 +#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT) +#define MIDR_VARIANT(midr) \ + (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT) +#define MIDR_IMPLEMENTOR_SHIFT 24 +#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT) +#define MIDR_IMPLEMENTOR(midr) \ + (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) + +#define MIDR_CPU_MODEL(imp, partnum) \ + (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ + (0xf << MIDR_ARCHITECTURE_SHIFT) | \ + ((partnum) << MIDR_PARTNUM_SHIFT)) + +#define MIDR_CPU_MODEL_MASK \ + (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | MIDR_ARCHITECTURE_MASK) + +#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \ +({ \ + u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \ + u32 _rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \ + \ + _model == (model) && _rv >= (rv_min) && _rv <= (rv_max); \ +}) + +#define ARM_CPU_IMP_ARM 0x41 + +#define ARM_CPU_PART_CORTEX_A12 0xC0D +#define ARM_CPU_PART_CORTEX_A17 0xC0E +#define ARM_CPU_PART_CORTEX_A15 0xC0F +#define ARM_CPU_PART_CORTEX_A53 0xD03 +#define ARM_CPU_PART_CORTEX_A55 0xD05 +#define ARM_CPU_PART_CORTEX_A57 0xD07 +#define ARM_CPU_PART_CORTEX_A72 0xD08 +#define ARM_CPU_PART_CORTEX_A73 0xD09 +#define ARM_CPU_PART_CORTEX_A75 0xD0A +#define ARM_CPU_PART_CORTEX_A76 0xD0B +#define ARM_CPU_PART_NEOVERSE_N1 0xD0C + +#define MIDR_CORTEX_A12 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A12) +#define MIDR_CORTEX_A17 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A17) +#define MIDR_CORTEX_A15 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A15) +#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) +#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) +#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) +#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) +#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) +#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) + +/* MPIDR Multiprocessor Affinity Register */ +#define _MPIDR_UP (30) +#define MPIDR_UP (_AC(1,UL) << _MPIDR_UP) +#define _MPIDR_SMP (31) +#define MPIDR_SMP (_AC(1,UL) << _MPIDR_SMP) +#define MPIDR_AFF0_SHIFT (0) +#define MPIDR_AFF0_MASK (_AC(0xff,UL) << MPIDR_AFF0_SHIFT) +#ifdef CONFIG_ARM_64 +#define MPIDR_HWID_MASK _AC(0xff00ffffff,UL) +#else +#define MPIDR_HWID_MASK _AC(0xffffff,U) +#endif +#define MPIDR_INVALID (~MPIDR_HWID_MASK) +#define MPIDR_LEVEL_BITS (8) + + +/* + * Macros to extract affinity level. picked from kernel + */ + +#define MPIDR_LEVEL_BITS_SHIFT 3 +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_LEVEL_SHIFT(level) \ + (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK) + +#define AFFINITY_MASK(level) ~((_AC(0x1,UL) << MPIDR_LEVEL_SHIFT(level)) - 1) + +/* TTBCR Translation Table Base Control Register */ +#define TTBCR_EAE _AC(0x80000000,U) +#define TTBCR_N_MASK _AC(0x07,U) +#define TTBCR_N_16KB _AC(0x00,U) +#define TTBCR_N_8KB _AC(0x01,U) +#define TTBCR_N_4KB _AC(0x02,U) +#define TTBCR_N_2KB _AC(0x03,U) +#define TTBCR_N_1KB _AC(0x04,U) + +/* + * TTBCR_PD(0|1) can be applied only if LPAE is disabled, i.e., TTBCR.EAE==0 + * (ARM DDI 0487B.a G6-5203 and ARM DDI 0406C.b B4-1722). + */ +#define TTBCR_PD0 (_AC(1,U)<<4) +#define TTBCR_PD1 (_AC(1,U)<<5) + +/* SCTLR System Control Register. */ + +/* Bits specific to SCTLR_EL1 for Arm32 */ + +#define SCTLR_A32_EL1_V BIT(13, UL) + +/* Common bits for SCTLR_ELx for Arm32 */ + +#define SCTLR_A32_ELx_TE BIT(30, UL) +#define SCTLR_A32_ELx_FI BIT(21, UL) + +/* Common bits for SCTLR_ELx for Arm64 */ +#define SCTLR_A64_ELx_SA BIT(3, UL) + +/* Common bits for SCTLR_ELx on all architectures */ +#define SCTLR_Axx_ELx_EE BIT(25, UL) +#define SCTLR_Axx_ELx_WXN BIT(19, UL) +#define SCTLR_Axx_ELx_I BIT(12, UL) +#define SCTLR_Axx_ELx_C BIT(2, UL) +#define SCTLR_Axx_ELx_A BIT(1, UL) +#define SCTLR_Axx_ELx_M BIT(0, UL) + +#ifdef CONFIG_ARM_32 + +#define HSCTLR_RES1 (BIT( 3, UL) | BIT( 4, UL) | BIT( 5, UL) |\ + BIT( 6, UL) | BIT(11, UL) | BIT(16, UL) |\ + BIT(18, UL) | BIT(22, UL) | BIT(23, UL) |\ + BIT(28, UL) | BIT(29, UL)) + +#define HSCTLR_RES0 (BIT(7, UL) | BIT(8, UL) | BIT(9, UL) | BIT(10, UL) |\ + BIT(13, UL) | BIT(14, UL) | BIT(15, UL) | BIT(17, UL) |\ + BIT(20, UL) | BIT(24, UL) | BIT(26, UL) | BIT(27, UL) |\ + BIT(31, UL)) + +/* Initial value for HSCTLR */ +#define HSCTLR_SET (HSCTLR_RES1 | SCTLR_Axx_ELx_A | SCTLR_Axx_ELx_I) + +/* Only used a pre-processing time... */ +#define HSCTLR_CLEAR (HSCTLR_RES0 | SCTLR_Axx_ELx_M |\ + SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_WXN |\ + SCTLR_A32_ELx_FI | SCTLR_Axx_ELx_EE |\ + SCTLR_A32_ELx_TE) + +#if (HSCTLR_SET ^ HSCTLR_CLEAR) != 0xffffffffU +#error "Inconsistent HSCTLR set/clear bits" +#endif + +#else + +#define SCTLR_EL2_RES1 (BIT( 4, UL) | BIT( 5, UL) | BIT(11, UL) |\ + BIT(16, UL) | BIT(18, UL) | BIT(22, UL) |\ + BIT(23, UL) | BIT(28, UL) | BIT(29, UL)) + +#define SCTLR_EL2_RES0 (BIT( 6, UL) | BIT( 7, UL) | BIT( 8, UL) |\ + BIT( 9, UL) | BIT(10, UL) | BIT(13, UL) |\ + BIT(14, UL) | BIT(15, UL) | BIT(17, UL) |\ + BIT(20, UL) | BIT(21, UL) | BIT(24, UL) |\ + BIT(26, UL) | BIT(27, UL) | BIT(30, UL) |\ + BIT(31, UL) | (0xffffffffULL << 32)) + +/* Initial value for SCTLR_EL2 */ +#define SCTLR_EL2_SET (SCTLR_EL2_RES1 | SCTLR_A64_ELx_SA |\ + SCTLR_Axx_ELx_I) + +/* Only used a pre-processing time... */ +#define SCTLR_EL2_CLEAR (SCTLR_EL2_RES0 | SCTLR_Axx_ELx_M |\ + SCTLR_Axx_ELx_A | SCTLR_Axx_ELx_C |\ + SCTLR_Axx_ELx_WXN | SCTLR_Axx_ELx_EE) + +#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL +#error "Inconsistent SCTLR_EL2 set/clear bits" +#endif + +#endif + +/* HCR Hyp Configuration Register */ +#define HCR_RW (_AC(1,UL)<<31) /* Register Width, ARM64 only */ +#define HCR_TGE (_AC(1,UL)<<27) /* Trap General Exceptions */ +#define HCR_TVM (_AC(1,UL)<<26) /* Trap Virtual Memory Controls */ +#define HCR_TTLB (_AC(1,UL)<<25) /* Trap TLB Maintenance Operations */ +#define HCR_TPU (_AC(1,UL)<<24) /* Trap Cache Maintenance Operations to PoU */ +#define HCR_TPC (_AC(1,UL)<<23) /* Trap Cache Maintenance Operations to PoC */ +#define HCR_TSW (_AC(1,UL)<<22) /* Trap Set/Way Cache Maintenance Operations */ +#define HCR_TAC (_AC(1,UL)<<21) /* Trap ACTLR Accesses */ +#define HCR_TIDCP (_AC(1,UL)<<20) /* Trap lockdown */ +#define HCR_TSC (_AC(1,UL)<<19) /* Trap SMC instruction */ +#define HCR_TID3 (_AC(1,UL)<<18) /* Trap ID Register Group 3 */ +#define HCR_TID2 (_AC(1,UL)<<17) /* Trap ID Register Group 2 */ +#define HCR_TID1 (_AC(1,UL)<<16) /* Trap ID Register Group 1 */ +#define HCR_TID0 (_AC(1,UL)<<15) /* Trap ID Register Group 0 */ +#define HCR_TWE (_AC(1,UL)<<14) /* Trap WFE instruction */ +#define HCR_TWI (_AC(1,UL)<<13) /* Trap WFI instruction */ +#define HCR_DC (_AC(1,UL)<<12) /* Default cacheable */ +#define HCR_BSU_MASK (_AC(3,UL)<<10) /* Barrier Shareability Upgrade */ +#define HCR_BSU_NONE (_AC(0,UL)<<10) +#define HCR_BSU_INNER (_AC(1,UL)<<10) +#define HCR_BSU_OUTER (_AC(2,UL)<<10) +#define HCR_BSU_FULL (_AC(3,UL)<<10) +#define HCR_FB (_AC(1,UL)<<9) /* Force Broadcast of Cache/BP/TLB operations */ +#define HCR_VA (_AC(1,UL)<<8) /* Virtual Asynchronous Abort */ +#define HCR_VI (_AC(1,UL)<<7) /* Virtual IRQ */ +#define HCR_VF (_AC(1,UL)<<6) /* Virtual FIQ */ +#define HCR_AMO (_AC(1,UL)<<5) /* Override CPSR.A */ +#define HCR_IMO (_AC(1,UL)<<4) /* Override CPSR.I */ +#define HCR_FMO (_AC(1,UL)<<3) /* Override CPSR.F */ +#define HCR_PTW (_AC(1,UL)<<2) /* Protected Walk */ +#define HCR_SWIO (_AC(1,UL)<<1) /* Set/Way Invalidation Override */ +#define HCR_VM (_AC(1,UL)<<0) /* Virtual MMU Enable */ + +/* TCR: Stage 1 Translation Control */ + +#define TCR_T0SZ_SHIFT (0) +#define TCR_T1SZ_SHIFT (16) +#define TCR_T0SZ(x) ((x)< */ + +/* HDCR Hyp. Debug Configuration Register */ +#define HDCR_TDRA (_AC(1,U)<<11) /* Trap Debug ROM access */ +#define HDCR_TDOSA (_AC(1,U)<<10) /* Trap Debug-OS-related register access */ +#define HDCR_TDA (_AC(1,U)<<9) /* Trap Debug Access */ +#define HDCR_TDE (_AC(1,U)<<8) /* Route Soft Debug exceptions from EL1/EL1 to EL2 */ +#define HDCR_TPM (_AC(1,U)<<6) /* Trap Performance Monitors accesses */ +#define HDCR_TPMCR (_AC(1,U)<<5) /* Trap PMCR accesses */ + +#define HSR_EC_SHIFT 26 + +#define HSR_EC_UNKNOWN 0x00 +#define HSR_EC_WFI_WFE 0x01 +#define HSR_EC_CP15_32 0x03 +#define HSR_EC_CP15_64 0x04 +#define HSR_EC_CP14_32 0x05 /* Trapped MCR or MRC access to CP14 */ +#define HSR_EC_CP14_DBG 0x06 /* Trapped LDC/STC access to CP14 (only for debug registers) */ +#define HSR_EC_CP 0x07 /* HCPTR-trapped access to CP0-CP13 */ +#define HSR_EC_CP10 0x08 +#define HSR_EC_JAZELLE 0x09 +#define HSR_EC_BXJ 0x0a +#define HSR_EC_CP14_64 0x0c +#define HSR_EC_SVC32 0x11 +#define HSR_EC_HVC32 0x12 +#define HSR_EC_SMC32 0x13 +#ifdef CONFIG_ARM_64 +#define HSR_EC_SVC64 0x15 +#define HSR_EC_HVC64 0x16 +#define HSR_EC_SMC64 0x17 +#define HSR_EC_SYSREG 0x18 +#endif +#define HSR_EC_INSTR_ABORT_LOWER_EL 0x20 +#define HSR_EC_INSTR_ABORT_CURR_EL 0x21 +#define HSR_EC_DATA_ABORT_LOWER_EL 0x24 +#define HSR_EC_DATA_ABORT_CURR_EL 0x25 +#ifdef CONFIG_ARM_64 +#define HSR_EC_BRK 0x3c +#endif + +/* FSR format, common */ +#define FSR_LPAE (_AC(1,UL)<<9) +/* FSR short format */ +#define FSRS_FS_DEBUG (_AC(0,UL)<<10|_AC(0x2,UL)<<0) +/* FSR long format */ +#define FSRL_STATUS_DEBUG (_AC(0x22,UL)<<0) + +#ifdef CONFIG_ARM_64 +#define MM64_VMID_8_BITS_SUPPORT 0x0 +#define MM64_VMID_16_BITS_SUPPORT 0x2 +#endif + +#ifndef __ASSEMBLY__ + +extern register_t __cpu_logical_map[]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + +#endif + +/* Physical Address Register */ +#define PAR_F (_AC(1,U)<<0) + +/* .... If F == 1 */ +#define PAR_FSC_SHIFT (1) +#define PAR_FSC_MASK (_AC(0x3f,U)< +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +#ifndef __ASSEMBLY__ +void panic_PAR(uint64_t par); + +void show_execution_state(const struct cpu_user_regs *regs); +void show_registers(const struct cpu_user_regs *regs); +//#define dump_execution_state() run_in_exception_handler(show_execution_state) +#define dump_execution_state() WARN() + +#define cpu_relax() barrier() /* Could yield? */ + +/* All a bit UP for the moment */ +#define cpu_to_core(_cpu) (0) +#define cpu_to_socket(_cpu) (0) + +struct vcpu; +void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, + struct vcpu_guest_core_regs *regs); +void vcpu_regs_user_to_hyp(struct vcpu *vcpu, + const struct vcpu_guest_core_regs *regs); + +void do_trap_hyp_serror(struct cpu_user_regs *regs); + +void do_trap_guest_serror(struct cpu_user_regs *regs); + +register_t get_default_hcr_flags(void); + +/* + * Synchronize SError unless the feature is selected. + * This is relying on the SErrors are currently unmasked. + */ +#define SYNCHRONIZE_SERROR(feat) \ + do { \ + ASSERT(local_abort_is_enabled()); \ + asm volatile(ALTERNATIVE("dsb sy; isb", \ + "nop; nop", feat) \ + : : : "memory"); \ + } while (0) + +/* + * Clear/Set flags in HCR_EL2 for a given vCPU. It only supports the current + * vCPU for now. + */ +#define vcpu_hcr_clear_flags(v, flags) \ + do { \ + ASSERT((v) == current); \ + (v)->arch.hcr_el2 &= ~(flags); \ + WRITE_SYSREG((v)->arch.hcr_el2, HCR_EL2); \ + } while (0) + +#define vcpu_hcr_set_flags(v, flags) \ + do { \ + ASSERT((v) == current); \ + (v)->arch.hcr_el2 |= (flags); \ + WRITE_SYSREG((v)->arch.hcr_el2, HCR_EL2); \ + } while (0) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_ARM_PROCESSOR_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/procinfo.h b/xen/arch/arm/include/asm/procinfo.h new file mode 100644 index 0000000000..02be56e348 --- /dev/null +++ b/xen/arch/arm/include/asm/procinfo.h @@ -0,0 +1,43 @@ +/* + * include/asm-arm/procinfo.h + * + * Bamvor Jian Zhang + * Copyright (c) 2013 SUSE + * + * base on linux/arch/arm/include/asm/procinfo.h + * Copyright (C) 1996-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_PROCINFO_H +#define __ASM_ARM_PROCINFO_H + +#include + +struct processor { + /* Initialize specific processor register for the new VPCU*/ + void (*vcpu_initialise)(struct vcpu *v); +}; + +struct proc_info_list { + unsigned int cpu_val; + unsigned int cpu_mask; + void (*cpu_init)(void); + struct processor *processor; +}; + +const struct proc_info_list *lookup_processor_type(void); + +void processor_setup(void); +void processor_vcpu_initialise(struct vcpu *v); + +#endif diff --git a/xen/arch/arm/include/asm/psci.h b/xen/arch/arm/include/asm/psci.h new file mode 100644 index 0000000000..832f77afff --- /dev/null +++ b/xen/arch/arm/include/asm/psci.h @@ -0,0 +1,91 @@ +#ifndef __ASM_PSCI_H__ +#define __ASM_PSCI_H__ + +#include + +/* PSCI return values (inclusive of all PSCI versions) */ +#define PSCI_SUCCESS 0 +#define PSCI_NOT_SUPPORTED -1 +#define PSCI_INVALID_PARAMETERS -2 +#define PSCI_DENIED -3 +#define PSCI_ALREADY_ON -4 +#define PSCI_ON_PENDING -5 +#define PSCI_INTERNAL_FAILURE -6 +#define PSCI_NOT_PRESENT -7 +#define PSCI_DISABLED -8 +#define PSCI_INVALID_ADDRESS -9 + +/* availability of PSCI on the host for SMP bringup */ +extern uint32_t psci_ver; + +int psci_init(void); +int call_psci_cpu_on(int cpu); +void call_psci_cpu_off(void); +void call_psci_system_off(void); +void call_psci_system_reset(void); + +/* PSCI v0.2 interface */ +#define PSCI_0_2_FN32(nr) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + nr) +#define PSCI_0_2_FN64(nr) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_64, \ + ARM_SMCCC_OWNER_STANDARD, \ + nr) + +#define PSCI_0_2_FN32_PSCI_VERSION PSCI_0_2_FN32(0) +#define PSCI_0_2_FN32_CPU_SUSPEND PSCI_0_2_FN32(1) +#define PSCI_0_2_FN32_CPU_OFF PSCI_0_2_FN32(2) +#define PSCI_0_2_FN32_CPU_ON PSCI_0_2_FN32(3) +#define PSCI_0_2_FN32_AFFINITY_INFO PSCI_0_2_FN32(4) +#define PSCI_0_2_FN32_MIGRATE_INFO_TYPE PSCI_0_2_FN32(6) +#define PSCI_0_2_FN32_SYSTEM_OFF PSCI_0_2_FN32(8) +#define PSCI_0_2_FN32_SYSTEM_RESET PSCI_0_2_FN32(9) +#define PSCI_1_0_FN32_PSCI_FEATURES PSCI_0_2_FN32(10) + +#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1) +#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3) +#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4) + +/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */ +#define PSCI_0_2_AFFINITY_LEVEL_ON 0 +#define PSCI_0_2_AFFINITY_LEVEL_OFF 1 +#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2 + +/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */ +#define PSCI_0_2_TOS_UP_MIGRATE_CAPABLE 0 +#define PSCI_0_2_TOS_UP_NOT_MIGRATE_CAPABLE 1 +#define PSCI_0_2_TOS_MP_OR_NOT_PRESENT 2 + +/* PSCI v0.2 power state encoding for CPU_SUSPEND function */ +#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff +#define PSCI_0_2_POWER_STATE_ID_SHIFT 0 +#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16 +#define PSCI_0_2_POWER_STATE_TYPE_MASK \ + (0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT) + +/* PSCI version decoding (independent of PSCI version) */ +#define PSCI_VERSION_MAJOR_SHIFT 16 +#define PSCI_VERSION_MINOR_MASK \ + ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1) +#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK +#define PSCI_VERSION_MAJOR(ver) \ + (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) +#define PSCI_VERSION_MINOR(ver) \ + ((ver) & PSCI_VERSION_MINOR_MASK) + +#define PSCI_VERSION(major, minor) \ + (((major) << PSCI_VERSION_MAJOR_SHIFT) | (minor)) + +#endif /* __ASM_PSCI_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/random.h b/xen/arch/arm/include/asm/random.h new file mode 100644 index 0000000000..b4acee276b --- /dev/null +++ b/xen/arch/arm/include/asm/random.h @@ -0,0 +1,9 @@ +#ifndef __ASM_RANDOM_H__ +#define __ASM_RANDOM_H__ + +static inline unsigned int arch_get_random(void) +{ + return 0; +} + +#endif /* __ASM_RANDOM_H__ */ diff --git a/xen/arch/arm/include/asm/regs.h b/xen/arch/arm/include/asm/regs.h new file mode 100644 index 0000000000..ec091a28a2 --- /dev/null +++ b/xen/arch/arm/include/asm/regs.h @@ -0,0 +1,73 @@ +#ifndef __ARM_REGS_H__ +#define __ARM_REGS_H__ + +#define PSR_MODE_MASK 0x1f + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +#define psr_mode(psr,m) (((psr) & PSR_MODE_MASK) == m) + +static inline bool psr_mode_is_32bit(const struct cpu_user_regs *regs) +{ +#ifdef CONFIG_ARM_32 + return true; +#else + return !!(regs->cpsr & PSR_MODE_BIT); +#endif +} + +#define usr_mode(r) psr_mode((r)->cpsr,PSR_MODE_USR) +#define fiq_mode(r) psr_mode((r)->cpsr,PSR_MODE_FIQ) +#define irq_mode(r) psr_mode((r)->cpsr,PSR_MODE_IRQ) +#define svc_mode(r) psr_mode((r)->cpsr,PSR_MODE_SVC) +#define mon_mode(r) psr_mode((r)->cpsr,PSR_MODE_MON) +#define abt_mode(r) psr_mode((r)->cpsr,PSR_MODE_ABT) +#define und_mode(r) psr_mode((r)->cpsr,PSR_MODE_UND) +#define sys_mode(r) psr_mode((r)->cpsr,PSR_MODE_SYS) + +#ifdef CONFIG_ARM_32 +#define hyp_mode(r) psr_mode((r)->cpsr,PSR_MODE_HYP) +#define psr_mode_is_user(r) usr_mode(r) +#else +#define hyp_mode(r) (psr_mode((r)->cpsr,PSR_MODE_EL2h) || \ + psr_mode((r)->cpsr,PSR_MODE_EL2t)) + +/* + * Trap may have been taken from EL0, which might be in AArch32 usr + * mode, or in AArch64 mode (PSR_MODE_EL0t). + */ +#define psr_mode_is_user(r) \ + (psr_mode((r)->cpsr,PSR_MODE_EL0t) || usr_mode(r)) +#endif + +static inline bool guest_mode(const struct cpu_user_regs *r) +{ + unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r); + /* Frame pointer must point into current CPU stack. */ + ASSERT(diff < STACK_SIZE); + /* If not a guest frame, it must be a hypervisor frame. */ + ASSERT((diff == 0) || hyp_mode(r)); + /* Return TRUE if it's a guest frame. */ + return (diff == 0); +} + +register_t get_user_reg(struct cpu_user_regs *regs, int reg); +void set_user_reg(struct cpu_user_regs *regs, int reg, register_t val); + +#endif + +#endif /* __ARM_REGS_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/scif-uart.h b/xen/arch/arm/include/asm/scif-uart.h new file mode 100644 index 0000000000..bce3404898 --- /dev/null +++ b/xen/arch/arm/include/asm/scif-uart.h @@ -0,0 +1,127 @@ +/* + * xen/include/asm-arm/scif-uart.h + * + * Common constant definition between early printk and the UART driver + * for the SCIF(A) compatible UART. + * + * Oleksandr Tyshchenko + * Copyright (C) 2014, Globallogic. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_SCIF_UART_H +#define __ASM_ARM_SCIF_UART_H + +/* Register offsets (SCIF) */ +#define SCIF_SCSMR (0x00) /* Serial mode register */ +#define SCIF_SCBRR (0x04) /* Bit rate register */ +#define SCIF_SCSCR (0x08) /* Serial control register */ +#define SCIF_SCFTDR (0x0C) /* Transmit FIFO data register */ +#define SCIF_SCFSR (0x10) /* Serial status register */ +#define SCIF_SCFRDR (0x14) /* Receive FIFO data register */ +#define SCIF_SCFCR (0x18) /* FIFO control register */ +#define SCIF_SCFDR (0x1C) /* FIFO data count register */ +#define SCIF_SCSPTR (0x20) /* Serial port register */ +#define SCIF_SCLSR (0x24) /* Line status register */ +#define SCIF_DL (0x30) /* Frequency division register */ +#define SCIF_CKS (0x34) /* Clock Select register */ + +/* Serial Control Register (SCSCR) */ +#define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */ +#define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */ +#define SCSCR_TE (1 << 5) /* Transmit Enable */ +#define SCSCR_RE (1 << 4) /* Receive Enable */ +#define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable */ +#define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable */ +#define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */ +#define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */ + +/* Serial Status Register (SCFSR) */ +#define SCFSR_ER (1 << 7) /* Receive Error */ +#define SCFSR_TEND (1 << 6) /* Transmission End */ +#define SCFSR_TDFE (1 << 5) /* Transmit FIFO Data Empty */ +#define SCFSR_BRK (1 << 4) /* Break Detect */ +#define SCFSR_FER (1 << 3) /* Framing Error */ +#define SCFSR_PER (1 << 2) /* Parity Error */ +#define SCFSR_RDF (1 << 1) /* Receive FIFO Data Full */ +#define SCFSR_DR (1 << 0) /* Receive Data Ready */ + +/* Line Status Register (SCLSR) */ +#define SCLSR_TO (1 << 2) /* Timeout */ +#define SCLSR_ORER (1 << 0) /* Overrun Error */ + +/* FIFO Control Register (SCFCR) */ +#define SCFCR_RTRG1 (1 << 7) /* Receive FIFO Data Count Trigger 1 */ +#define SCFCR_RTRG0 (1 << 6) /* Receive FIFO Data Count Trigger 0 */ +#define SCFCR_TTRG1 (1 << 5) /* Transmit FIFO Data Count Trigger 1 */ +#define SCFCR_TTRG0 (1 << 4) /* Transmit FIFO Data Count Trigger 0 */ +#define SCFCR_MCE (1 << 3) /* Modem Control Enable */ +#define SCFCR_TFRST (1 << 2) /* Transmit FIFO Data Register Reset */ +#define SCFCR_RFRST (1 << 1) /* Receive FIFO Data Register Reset */ +#define SCFCR_LOOP (1 << 0) /* Loopback Test */ + +#define SCFCR_RTRG00 (0) +#define SCFCR_RTRG01 (SCFCR_RTRG0) +#define SCFCR_RTRG10 (SCFCR_RTRG1) +#define SCFCR_RTRG11 (SCFCR_RTRG1 | SCFCR_RTRG0) + +#define SCFCR_TTRG00 (0) +#define SCFCR_TTRG01 (SCFCR_TTRG0) +#define SCFCR_TTRG10 (SCFCR_TTRG1) +#define SCFCR_TTRG11 (SCFCR_TTRG1 | SCFCR_TTRG0) + +/* Register offsets (SCIFA) */ +#define SCIFA_SCASMR (0x00) /* Serial mode register */ +#define SCIFA_SCABRR (0x04) /* Bit rate register */ +#define SCIFA_SCASCR (0x08) /* Serial control register */ +#define SCIFA_SCATDSR (0x0C) /* Transmit data stop register */ +#define SCIFA_SCAFER (0x10) /* FIFO error count register */ +#define SCIFA_SCASSR (0x14) /* Serial status register */ +#define SCIFA_SCAFCR (0x18) /* FIFO control register */ +#define SCIFA_SCAFDR (0x1C) /* FIFO data count register */ +#define SCIFA_SCAFTDR (0x20) /* Transmit FIFO data register */ +#define SCIFA_SCAFRDR (0x24) /* Receive FIFO data register */ +#define SCIFA_SCAPCR (0x30) /* Serial port control register */ +#define SCIFA_SCAPDR (0x34) /* Serial port data register */ + +/* Serial Control Register (SCASCR) */ +#define SCASCR_ERIE (1 << 10) /* Receive Error Interrupt Enable */ +#define SCASCR_BRIE (1 << 9) /* Break Interrupt Enable */ +#define SCASCR_DRIE (1 << 8) /* Receive Data Ready Interrupt Enable */ +#define SCASCR_TIE (1 << 7) /* Transmit Interrupt Enable */ +#define SCASCR_RIE (1 << 6) /* Receive Interrupt Enable */ +#define SCASCR_TE (1 << 5) /* Transmit Enable */ +#define SCASCR_RE (1 << 4) /* Receive Enable */ +#define SCASCR_CKE0 (1 << 0) /* Clock Enable 0 */ + +/* Serial Status Register (SCASSR) */ +#define SCASSR_ORER (1 << 9) /* Overrun Error */ +#define SCASSR_TSF (1 << 8) /* Transmit Data Stop */ +#define SCASSR_ER (1 << 7) /* Receive Error */ +#define SCASSR_TEND (1 << 6) /* Transmission End */ +#define SCASSR_TDFE (1 << 5) /* Transmit FIFO Data Empty */ +#define SCASSR_BRK (1 << 4) /* Break Detect */ +#define SCASSR_FER (1 << 3) /* Framing Error */ +#define SCASSR_PER (1 << 2) /* Parity Error */ +#define SCASSR_RDF (1 << 1) /* Receive FIFO Data Full */ +#define SCASSR_DR (1 << 0) /* Receive Data Ready */ + +#endif /* __ASM_ARM_SCIF_UART_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/setup.h b/xen/arch/arm/include/asm/setup.h new file mode 100644 index 0000000000..95da0b7ab9 --- /dev/null +++ b/xen/arch/arm/include/asm/setup.h @@ -0,0 +1,135 @@ +#ifndef __ARM_SETUP_H_ +#define __ARM_SETUP_H_ + +#include + +#define MIN_FDT_ALIGN 8 +#define MAX_FDT_SIZE SZ_2M + +#define NR_MEM_BANKS 128 + +#define MAX_MODULES 32 /* Current maximum useful modules */ + +typedef enum { + BOOTMOD_XEN, + BOOTMOD_FDT, + BOOTMOD_KERNEL, + BOOTMOD_RAMDISK, + BOOTMOD_XSM, + BOOTMOD_GUEST_DTB, + BOOTMOD_UNKNOWN +} bootmodule_kind; + + +struct membank { + paddr_t start; + paddr_t size; + bool xen_domain; /* whether the memory bank is bound to a Xen domain. */ +}; + +struct meminfo { + int nr_banks; + struct membank bank[NR_MEM_BANKS]; +}; + +/* + * The domU flag is set for kernels and ramdisks of "xen,domain" nodes. + * The purpose of the domU flag is to avoid getting confused in + * kernel_probe, where we try to guess which is the dom0 kernel and + * initrd to be compatible with all versions of the multiboot spec. + */ +#define BOOTMOD_MAX_CMDLINE 1024 +struct bootmodule { + bootmodule_kind kind; + bool domU; + paddr_t start; + paddr_t size; +}; + +/* DT_MAX_NAME is the node name max length according the DT spec */ +#define DT_MAX_NAME 41 +struct bootcmdline { + bootmodule_kind kind; + bool domU; + paddr_t start; + char dt_name[DT_MAX_NAME]; + char cmdline[BOOTMOD_MAX_CMDLINE]; +}; + +struct bootmodules { + int nr_mods; + struct bootmodule module[MAX_MODULES]; +}; + +struct bootcmdlines { + unsigned int nr_mods; + struct bootcmdline cmdline[MAX_MODULES]; +}; + +struct bootinfo { + struct meminfo mem; + /* The reserved regions are only used when booting using Device-Tree */ + struct meminfo reserved_mem; + struct bootmodules modules; + struct bootcmdlines cmdlines; +#ifdef CONFIG_ACPI + struct meminfo acpi; +#endif +}; + +extern struct bootinfo bootinfo; + +extern domid_t max_init_domid; + +void copy_from_paddr(void *dst, paddr_t paddr, unsigned long len); + +size_t estimate_efi_size(int mem_nr_banks); + +void acpi_create_efi_system_table(struct domain *d, + struct membank tbl_add[]); + +void acpi_create_efi_mmap_table(struct domain *d, + const struct meminfo *mem, + struct membank tbl_add[]); + +int acpi_make_efi_nodes(void *fdt, struct membank tbl_add[]); + +void create_domUs(void); +void create_dom0(void); + +void discard_initial_modules(void); +void fw_unreserved_regions(paddr_t s, paddr_t e, + void (*cb)(paddr_t, paddr_t), int first); + +size_t boot_fdt_info(const void *fdt, paddr_t paddr); +const char *boot_fdt_cmdline(const void *fdt); + +struct bootmodule *add_boot_module(bootmodule_kind kind, + paddr_t start, paddr_t size, bool domU); +struct bootmodule *boot_module_find_by_kind(bootmodule_kind kind); +struct bootmodule * boot_module_find_by_addr_and_kind(bootmodule_kind kind, + paddr_t start); +void add_boot_cmdline(const char *name, const char *cmdline, + bootmodule_kind kind, paddr_t start, bool domU); +struct bootcmdline *boot_cmdline_find_by_kind(bootmodule_kind kind); +struct bootcmdline * boot_cmdline_find_by_name(const char *name); +const char *boot_module_kind_as_string(bootmodule_kind kind); + +extern uint32_t hyp_traps_vector[]; +void init_traps(void); + +void device_tree_get_reg(const __be32 **cell, u32 address_cells, + u32 size_cells, u64 *start, u64 *size); + +u32 device_tree_get_u32(const void *fdt, int node, + const char *prop_name, u32 dflt); + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/short-desc.h b/xen/arch/arm/include/asm/short-desc.h new file mode 100644 index 0000000000..9652a103c4 --- /dev/null +++ b/xen/arch/arm/include/asm/short-desc.h @@ -0,0 +1,130 @@ +#ifndef __ARM_SHORT_DESC_H__ +#define __ARM_SHORT_DESC_H__ + +/* + * First level translation table descriptor types used by the AArch32 + * short-descriptor translation table format. + */ +#define L1DESC_INVALID (0) +#define L1DESC_PAGE_TABLE (1) +#define L1DESC_SECTION (2) +#define L1DESC_SECTION_PXN (3) + +/* Defines for section and supersection shifts. */ +#define L1DESC_SECTION_SHIFT (20) +#define L1DESC_SUPERSECTION_SHIFT (24) +#define L1DESC_SUPERSECTION_EXT_BASE1_SHIFT (32) +#define L1DESC_SUPERSECTION_EXT_BASE2_SHIFT (36) + +/* Second level translation table descriptor types. */ +#define L2DESC_INVALID (0) + +/* Defines for small (4K) and large page (64K) shifts. */ +#define L2DESC_SMALL_PAGE_SHIFT (12) +#define L2DESC_LARGE_PAGE_SHIFT (16) + +/* + * Comprises bits of the level 1 short-descriptor format representing + * a section. + */ +typedef struct __packed { + bool pxn:1; /* Privileged Execute Never */ + bool sec:1; /* == 1 if section or supersection */ + bool b:1; /* Bufferable */ + bool c:1; /* Cacheable */ + bool xn:1; /* Execute Never */ + unsigned int dom:4; /* Domain field */ + bool impl:1; /* Implementation defined */ + unsigned int ap:2; /* AP[1:0] */ + unsigned int tex:3; /* TEX[2:0] */ + bool ro:1; /* AP[2] */ + bool s:1; /* Shareable */ + bool ng:1; /* Non-global */ + bool supersec:1; /* Must be 0 for sections */ + bool ns:1; /* Non-secure */ + unsigned int base:12; /* Section base address */ +} short_desc_l1_sec_t; + +/* + * Comprises bits of the level 1 short-descriptor format representing + * a supersection. + */ +typedef struct __packed { + bool pxn:1; /* Privileged Execute Never */ + bool sec:1; /* == 1 if section or supersection */ + bool b:1; /* Bufferable */ + bool c:1; /* Cacheable */ + bool xn:1; /* Execute Never */ + unsigned int extbase2:4; /* Extended base address, PA[39:36] */ + bool impl:1; /* Implementation defined */ + unsigned int ap:2; /* AP[1:0] */ + unsigned int tex:3; /* TEX[2:0] */ + bool ro:1; /* AP[2] */ + bool s:1; /* Shareable */ + bool ng:1; /* Non-global */ + bool supersec:1; /* Must be 0 for sections */ + bool ns:1; /* Non-secure */ + unsigned int extbase1:4; /* Extended base address, PA[35:32] */ + unsigned int base:8; /* Supersection base address */ +} short_desc_l1_supersec_t; + +/* + * Comprises bits of the level 2 short-descriptor format representing + * a small page. + */ +typedef struct __packed { + bool xn:1; /* Execute Never */ + bool page:1; /* ==1 if small page */ + bool b:1; /* Bufferable */ + bool c:1; /* Cacheable */ + unsigned int ap:2; /* AP[1:0] */ + unsigned int tex:3; /* TEX[2:0] */ + bool ro:1; /* AP[2] */ + bool s:1; /* Shareable */ + bool ng:1; /* Non-global */ + unsigned int base:20; /* Small page base address */ +} short_desc_l2_page_t; + +/* + * Comprises bits of the level 2 short-descriptor format representing + * a large page. + */ +typedef struct __packed { + bool lpage:1; /* ==1 if large page */ + bool page:1; /* ==0 if large page */ + bool b:1; /* Bufferable */ + bool c:1; /* Cacheable */ + unsigned int ap:2; /* AP[1:0] */ + unsigned int sbz:3; /* Should be zero */ + bool ro:1; /* AP[2] */ + bool s:1; /* Shareable */ + bool ng:1; /* Non-global */ + unsigned int tex:3; /* TEX[2:0] */ + bool xn:1; /* Execute Never */ + unsigned int base:16; /* Large page base address */ +} short_desc_l2_lpage_t; + +/* + * Comprises the bits required to walk page tables adhering to the + * short-descriptor translation table format. + */ +typedef struct __packed { + unsigned int dt:2; /* Descriptor type */ + unsigned int pad1:8; + unsigned int base:22; /* Base address of block or next table */ +} short_desc_walk_t; + +/* + * Represents page table entries adhering to the short-descriptor translation + * table format. + */ +typedef union { + uint32_t bits; + short_desc_walk_t walk; + short_desc_l1_sec_t sec; + short_desc_l1_supersec_t supersec; + short_desc_l2_page_t pg; + short_desc_l2_lpage_t lpg; +} short_desc_t; + +#endif /* __ARM_SHORT_DESC_H__ */ diff --git a/xen/arch/arm/include/asm/smccc.h b/xen/arch/arm/include/asm/smccc.h new file mode 100644 index 0000000000..9d94beb3df --- /dev/null +++ b/xen/arch/arm/include/asm/smccc.h @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2017, EPAM Systems + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARM_SMCCC_H__ +#define __ASM_ARM_SMCCC_H__ + +#include +#include + +#define SMCCC_VERSION_MAJOR_SHIFT 16 +#define SMCCC_VERSION_MINOR_MASK \ + ((1U << SMCCC_VERSION_MAJOR_SHIFT) - 1) +#define SMCCC_VERSION_MAJOR_MASK ~SMCCC_VERSION_MINOR_MASK +#define SMCCC_VERSION_MAJOR(ver) \ + (((ver) & SMCCC_VERSION_MAJOR_MASK) >> SMCCC_VERSION_MAJOR_SHIFT) +#define SMCCC_VERSION_MINOR(ver) \ + ((ver) & SMCCC_VERSION_MINOR_MASK) + +#define SMCCC_VERSION(major, minor) \ + (((major) << SMCCC_VERSION_MAJOR_SHIFT) | (minor)) + +#define ARM_SMCCC_VERSION_1_0 SMCCC_VERSION(1, 0) +#define ARM_SMCCC_VERSION_1_1 SMCCC_VERSION(1, 1) + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_CONV_32 _AC(0,U) +#define ARM_SMCCC_CONV_64 _AC(1,U) +#define ARM_SMCCC_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK _AC(0x3F,U) +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK _AC(0xFFFF,U) + +#ifndef __ASSEMBLY__ + +extern uint32_t smccc_ver; + +/* Check if this is fast call. */ +static inline bool smccc_is_fast_call(register_t funcid) +{ + return funcid & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT); +} + +/* Chek if this is 64-bit call. */ +static inline bool smccc_is_conv_64(register_t funcid) +{ + return funcid & (ARM_SMCCC_CONV_64 << ARM_SMCCC_CONV_SHIFT); +} + +/* Get function number from function identifier. */ +static inline uint32_t smccc_get_fn(register_t funcid) +{ + return funcid & ARM_SMCCC_FUNC_MASK; +} + +/* Get service owner number from function identifier. */ +static inline uint32_t smccc_get_owner(register_t funcid) +{ + return (funcid >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK; +} + +/* + * struct arm_smccc_res - Result from SMC call + * @a0 - @a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/* SMCCC v1.1 implementation madness follows */ +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x + +#define __count_args(...) \ + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __constraint_write_0 \ + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_1 \ + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_2 \ + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) +#define __constraint_write_3 \ + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) +#define __constraint_write_4 __constraint_write_3 +#define __constraint_write_5 __constraint_write_4 +#define __constraint_write_6 __constraint_write_5 +#define __constraint_write_7 __constraint_write_6 + +#define __constraint_read_0 +#define __constraint_read_1 +#define __constraint_read_2 +#define __constraint_read_3 +#define __constraint_read_4 "r" (r4) +#define __constraint_read_5 __constraint_read_4, "r" (r5) +#define __constraint_read_6 __constraint_read_5, "r" (r6) +#define __constraint_read_7 __constraint_read_6, "r" (r7) + +#define __declare_arg_0(a0, res) \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ + register unsigned long r1 ASM_REG(1); \ + register unsigned long r2 ASM_REG(2); \ + register unsigned long r3 ASM_REG(3) + +#define __declare_arg_1(a0, a1, res) \ + typeof(a1) __a1 = a1; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ + register unsigned long r1 ASM_REG(1) = __a1; \ + register unsigned long r2 ASM_REG(2); \ + register unsigned long r3 ASM_REG(3) + +#define __declare_arg_2(a0, a1, a2, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ + register unsigned long r1 ASM_REG(1) = __a1; \ + register unsigned long r2 ASM_REG(2) = __a2; \ + register unsigned long r3 ASM_REG(3) + +#define __declare_arg_3(a0, a1, a2, a3, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + typeof(a3) __a3 = a3; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ + register unsigned long r1 ASM_REG(1) = __a1; \ + register unsigned long r2 ASM_REG(2) = __a2; \ + register unsigned long r3 ASM_REG(3) = __a3 + +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + typeof(a4) __a4 = a4; \ + __declare_arg_3(a0, a1, a2, a3, res); \ + register unsigned long r4 ASM_REG(4) = __a4 + +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + typeof(a5) __a5 = a5; \ + __declare_arg_4(a0, a1, a2, a3, a4, res); \ + register typeof(a5) r5 ASM_REG(5) = __a5 + +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + typeof(a6) __a6 = a6; \ + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ + register typeof(a6) r6 ASM_REG(6) = __a6 + +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + typeof(a7) __a7 = a7; \ + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ + register typeof(a7) r7 ASM_REG(7) = __a7 + +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) + +#define ___constraints(count) \ + : __constraint_write_ ## count \ + : __constraint_read_ ## count \ + : "memory" +#define __constraints(count) ___constraints(count) + +/* + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make SMC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction if not NULL. + * + * We have an output list that is not necessarily used, and GCC feels + * entitled to optimise the whole sequence away. "volatile" is what + * makes it stick. + */ +#define arm_smccc_1_1_smc(...) \ + do { \ + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + asm volatile("smc #0\n" \ + __constraints(__count_args(__VA_ARGS__))); \ + if ( ___res ) \ + *___res = (typeof(*___res)){r0, r1, r2, r3}; \ + } while ( 0 ) + +/* + * The calling convention for arm32 is the same for both SMCCC v1.0 and + * v1.1. + */ +#ifdef CONFIG_ARM_32 +#define arm_smccc_1_0_smc(...) arm_smccc_1_1_smc(__VA_ARGS__) +#define arm_smccc_smc(...) arm_smccc_1_1_smc(__VA_ARGS__) +#else + +void __arm_smccc_1_0_smc(register_t a0, register_t a1, register_t a2, + register_t a3, register_t a4, register_t a5, + register_t a6, register_t a7, + struct arm_smccc_res *res); + +/* Macros to handle variadic parameter for SMCCC v1.0 helper */ +#define __arm_smccc_1_0_smc_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + __arm_smccc_1_0_smc(a0, a1, a2, a3, a4, a5, a6, a7, res) + +#define __arm_smccc_1_0_smc_6(a0, a1, a2, a3, a4, a5, a6, res) \ + __arm_smccc_1_0_smc_7(a0, a1, a2, a3, a4, a5, a6, 0, res) + +#define __arm_smccc_1_0_smc_5(a0, a1, a2, a3, a4, a5, res) \ + __arm_smccc_1_0_smc_6(a0, a1, a2, a3, a4, a5, 0, res) + +#define __arm_smccc_1_0_smc_4(a0, a1, a2, a3, a4, res) \ + __arm_smccc_1_0_smc_5(a0, a1, a2, a3, a4, 0, res) + +#define __arm_smccc_1_0_smc_3(a0, a1, a2, a3, res) \ + __arm_smccc_1_0_smc_4(a0, a1, a2, a3, 0, res) + +#define __arm_smccc_1_0_smc_2(a0, a1, a2, res) \ + __arm_smccc_1_0_smc_3(a0, a1, a2, 0, res) + +#define __arm_smccc_1_0_smc_1(a0, a1, res) \ + __arm_smccc_1_0_smc_2(a0, a1, 0, res) + +#define __arm_smccc_1_0_smc_0(a0, res) \ + __arm_smccc_1_0_smc_1(a0, 0, res) + +#define ___arm_smccc_1_0_smc_count(count, ...) \ + __arm_smccc_1_0_smc_ ## count(__VA_ARGS__) + +#define __arm_smccc_1_0_smc_count(count, ...) \ + ___arm_smccc_1_0_smc_count(count, __VA_ARGS__) + +#define arm_smccc_1_0_smc(...) \ + __arm_smccc_1_0_smc_count(__count_args(__VA_ARGS__), __VA_ARGS__) + +#define arm_smccc_smc(...) \ + do { \ + if ( cpus_have_const_cap(ARM_SMCCC_1_1) ) \ + arm_smccc_1_1_smc(__VA_ARGS__); \ + else \ + arm_smccc_1_0_smc(__VA_ARGS__); \ + } while ( 0 ) +#endif /* CONFIG_ARM_64 */ + +#endif /* __ASSEMBLY__ */ + +/* + * Construct function identifier from call type (fast or standard), + * calling convention (32 or 64 bit), service owner and function number. + */ +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + (func_num)) + +/* List of known service owners */ +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_HYPERVISOR 5 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +/* List of generic function numbers */ +#define ARM_SMCCC_CALL_COUNT_FID(owner) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_##owner, \ + 0xFF00) + +#define ARM_SMCCC_CALL_UID_FID(owner) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_##owner, \ + 0xFF01) + +#define ARM_SMCCC_REVISION_FID(owner) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_##owner, \ + 0xFF03) + +#define ARM_SMCCC_VERSION_FID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_ARCH, \ + 0x0) \ + +#define ARM_SMCCC_ARCH_FEATURES_FID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_ARCH, \ + 0x1) + +#define ARM_SMCCC_ARCH_WORKAROUND_1_FID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_ARCH, \ + 0x8000) + +#define ARM_SMCCC_ARCH_WORKAROUND_2_FID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_ARCH, \ + 0x7FFF) + +/* SMCCC error codes */ +#define ARM_SMCCC_NOT_REQUIRED (-2) +#define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1) +#define ARM_SMCCC_NOT_SUPPORTED (-1) +#define ARM_SMCCC_SUCCESS (0) + +/* SMCCC function identifier range which is reserved for existing APIs */ +#define ARM_SMCCC_RESERVED_RANGE_START 0x0 +#define ARM_SMCCC_RESERVED_RANGE_END 0x0100FFFF + +#endif /* __ASM_ARM_SMCCC_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End:b + */ diff --git a/xen/arch/arm/include/asm/smp.h b/xen/arch/arm/include/asm/smp.h new file mode 100644 index 0000000000..af5a2fe652 --- /dev/null +++ b/xen/arch/arm/include/asm/smp.h @@ -0,0 +1,46 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#ifndef __ASSEMBLY__ +#include +#include +#include +#endif + +DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask); +DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask); + +#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) + +#define smp_processor_id() get_processor_id() + +/* + * Do we, for platform reasons, need to actually keep CPUs online when we + * would otherwise prefer them to be off? + */ +#define park_offline_cpus false + +extern void noreturn stop_cpu(void); + +extern int arch_smp_init(void); +extern int arch_cpu_init(int cpu, struct dt_device_node *dn); +extern int arch_cpu_up(int cpu); + +int cpu_up_send_sgi(int cpu); + +/* Secondary CPU entry point */ +extern void init_secondary(void); + +extern void smp_init_cpus(void); +extern void smp_clear_cpu_maps (void); +extern int smp_get_max_cpus (void); +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/softirq.h b/xen/arch/arm/include/asm/softirq.h new file mode 100644 index 0000000000..976e0ebd70 --- /dev/null +++ b/xen/arch/arm/include/asm/softirq.h @@ -0,0 +1,16 @@ +#ifndef __ASM_SOFTIRQ_H__ +#define __ASM_SOFTIRQ_H__ + +#define NR_ARCH_SOFTIRQS 0 + +#define arch_skip_send_event_check(cpu) 0 + +#endif /* __ASM_SOFTIRQ_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/spinlock.h b/xen/arch/arm/include/asm/spinlock.h new file mode 100644 index 0000000000..42b0f584fe --- /dev/null +++ b/xen/arch/arm/include/asm/spinlock.h @@ -0,0 +1,15 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#define arch_lock_acquire_barrier() smp_mb() +#define arch_lock_release_barrier() smp_mb() + +#define arch_lock_relax() wfe() +#define arch_lock_signal() do { \ + dsb(ishst); \ + sev(); \ +} while(0) + +#define arch_lock_signal_wmb() arch_lock_signal() + +#endif /* __ASM_SPINLOCK_H */ diff --git a/xen/arch/arm/include/asm/string.h b/xen/arch/arm/include/asm/string.h new file mode 100644 index 0000000000..b485e49044 --- /dev/null +++ b/xen/arch/arm/include/asm/string.h @@ -0,0 +1,53 @@ +#ifndef __ARM_STRING_H__ +#define __ARM_STRING_H__ + + +/* + * We don't do inline string functions, since the + * optimised inline asm versions are not small. + */ + +#define __HAVE_ARCH_STRRCHR +#define __HAVE_ARCH_STRCHR +#if defined(CONFIG_ARM_64) +#define __HAVE_ARCH_STRCMP +#define __HAVE_ARCH_STRNCMP +#define __HAVE_ARCH_STRLEN +#define __HAVE_ARCH_STRNLEN +#endif + +#define __HAVE_ARCH_MEMCPY +#if defined(CONFIG_ARM_64) +#define __HAVE_ARCH_MEMCMP +#endif +#define __HAVE_ARCH_MEMMOVE +#define __HAVE_ARCH_MEMSET +#define __HAVE_ARCH_MEMCHR + +#if defined(CONFIG_ARM_32) + +void __memzero(void *ptr, size_t n); + +#define memset(p, v, n) \ + ({ \ + void *__p = (p); size_t __n = n; \ + if ((__n) != 0) { \ + if (__builtin_constant_p((v)) && (v) == 0) \ + __memzero((__p),(__n)); \ + else \ + memset((__p),(v),(__n)); \ + } \ + (__p); \ + }) + +#endif + +#endif /* __ARM_STRING_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/sysregs.h b/xen/arch/arm/include/asm/sysregs.h new file mode 100644 index 0000000000..5c5c51bbcd --- /dev/null +++ b/xen/arch/arm/include/asm/sysregs.h @@ -0,0 +1,22 @@ +#ifndef __ASM_ARM_SYSREGS_H +#define __ASM_ARM_SYSREGS_H + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +#endif /* __ASM_ARM_SYSREGS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ + + diff --git a/xen/arch/arm/include/asm/system.h b/xen/arch/arm/include/asm/system.h new file mode 100644 index 0000000000..65d5c8e423 --- /dev/null +++ b/xen/arch/arm/include/asm/system.h @@ -0,0 +1,73 @@ +/* Portions taken from Linux arch arm */ +#ifndef __ASM_SYSTEM_H +#define __ASM_SYSTEM_H + +#include +#include + +#define sev() asm volatile("sev" : : : "memory") +#define wfe() asm volatile("wfe" : : : "memory") +#define wfi() asm volatile("wfi" : : : "memory") + +#define isb() asm volatile("isb" : : : "memory") +#define dsb(scope) asm volatile("dsb " #scope : : : "memory") +#define dmb(scope) asm volatile("dmb " #scope : : : "memory") + +#define mb() dsb(sy) +#ifdef CONFIG_ARM_64 +#define rmb() dsb(ld) +#else +#define rmb() dsb(sy) /* 32-bit has no ld variant. */ +#endif +#define wmb() dsb(st) + +#define smp_mb() dmb(ish) +#ifdef CONFIG_ARM_64 +#define smp_rmb() dmb(ishld) +#else +#define smp_rmb() dmb(ish) /* 32-bit has no ishld variant. */ +#endif + +#define smp_wmb() dmb(ishst) + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() + +/* + * This is used to ensure the compiler did actually allocate the register we + * asked it for some inline assembly sequences. Apparently we can't trust + * the compiler from one version to another so a bit of paranoia won't hurt. + * This string is meant to be concatenated with the inline asm string and + * will cause compilation to stop on mismatch. + * (for details, see gcc PR 15089) + */ +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + +static inline int local_abort_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_ABT_MASK); +} + +#define arch_fetch_and_add(x, v) __sync_fetch_and_add(x, v) + +extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next); + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/tee/optee_msg.h b/xen/arch/arm/include/asm/tee/optee_msg.h new file mode 100644 index 0000000000..fe743dbde3 --- /dev/null +++ b/xen/arch/arm/include/asm/tee/optee_msg.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (c) 2015-2017, Linaro Limited + */ +#ifndef _OPTEE_MSG_H +#define _OPTEE_MSG_H + +#include +#include + +/* + * This file defines the OP-TEE message protocol used to communicate + * with an instance of OP-TEE running in secure world. + */ + +/***************************************************************************** + * Part 1 - formatting of messages + *****************************************************************************/ + +#define OPTEE_MSG_ATTR_TYPE_NONE 0x0 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1 +#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 +#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 +#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 +#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa +#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb + +#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0) + +/* + * Meta parameter to be absorbed by the Secure OS and not passed + * to the Trusted Application. + * + * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION. + */ +#define OPTEE_MSG_ATTR_META BIT(8, UL) + +/* + * Pointer to a list of pages used to register user-defined SHM buffer. + * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*. + * buf_ptr should point to the beginning of the buffer. Buffer will contain + * list of page addresses. OP-TEE core can reconstruct contiguous buffer from + * that page addresses list. Page addresses are stored as 64 bit values. + * Last entry on a page should point to the next page of buffer. + * Every entry in buffer should point to a 4k page beginning (12 least + * significant bits must be equal to zero). + * + * 12 least significant of optee_msg_param.u.tmem.buf_ptr should hold page + * offset of user buffer. + * + * So, entries should be placed like members of this structure: + * + * struct page_data { + * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1]; + * uint64_t next_page_data; + * }; + * + * Structure is designed to exactly fit into the page size + * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page. + * + * The size of 4KB is chosen because this is the smallest page size for ARM + * architectures. If REE uses larger pages, it should divide them to 4KB ones. + */ +#define OPTEE_MSG_ATTR_NONCONTIG BIT(9, UL) + +/* + * Memory attributes for caching passed with temp memrefs. The actual value + * used is defined outside the message protocol with the exception of + * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already + * defined for the memory range should be used. If optee_smc.h is used as + * bearer of this protocol OPTEE_SMC_SHM_* is used for values. + */ +#define OPTEE_MSG_ATTR_CACHE_SHIFT 16 +#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0) +#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0 + +/* + * Same values as TEE_LOGIN_* from TEE Internal API + */ +#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000 +#define OPTEE_MSG_LOGIN_USER 0x00000001 +#define OPTEE_MSG_LOGIN_GROUP 0x00000002 +#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004 +#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 +#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 + +/* + * Page size used in non-contiguous buffer entries + */ +#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096 + +#ifndef ASM +/** + * struct optee_msg_param_tmem - temporary memory reference parameter + * @buf_ptr: Address of the buffer + * @size: Size of the buffer + * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm + * + * Secure and normal world communicates pointers as physical address + * instead of the virtual address. This is because secure and normal world + * have completely independent memory mapping. Normal world can even have a + * hypervisor which need to translate the guest physical address (AKA IPA + * in ARM documentation) to a real physical address before passing the + * structure to secure world. + */ +struct optee_msg_param_tmem { + uint64_t buf_ptr; + uint64_t size; + uint64_t shm_ref; +}; + +/** + * struct optee_msg_param_rmem - registered memory reference parameter + * @offs: Offset into shared memory reference + * @size: Size of the buffer + * @shm_ref: Shared memory reference, pointer to a struct tee_shm + */ +struct optee_msg_param_rmem { + uint64_t offs; + uint64_t size; + uint64_t shm_ref; +}; + +/** + * struct optee_msg_param_value - values + * @a: first value + * @b: second value + * @c: third value + */ +struct optee_msg_param_value { + uint64_t a; + uint64_t b; + uint64_t c; +}; + +/** + * struct optee_msg_param - parameter + * @attr: attributes + * @memref: a memory reference + * @value: a value + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in + * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. + */ +struct optee_msg_param { + uint64_t attr; + union { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; + struct optee_msg_param_value value; + } u; +}; + +/** + * struct optee_msg_arg - call argument + * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_* + * @func: Trusted Application function, specific to the Trusted Application, + * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND + * @session: In parameter for all OPTEE_MSG_CMD_* except + * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead + * @cancel_id: Cancellation id, a unique value to identify this request + * @ret: return value + * @ret_origin: origin of the return value + * @num_params: number of parameters supplied to the OS Command + * @params: the parameters supplied to the OS Command + * + * All normal calls to Trusted OS uses this struct. If cmd requires further + * information than what these fields hold it can be passed as a parameter + * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding + * attrs field). All parameters tagged as meta have to come first. + */ +struct optee_msg_arg { + uint32_t cmd; + uint32_t func; + uint32_t session; + uint32_t cancel_id; + uint32_t pad; + uint32_t ret; + uint32_t ret_origin; + uint32_t num_params; + + /* num_params tells the actual number of element in params */ + struct optee_msg_param params[]; +}; + +/** + * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg + * + * @num_params: Number of parameters embedded in the struct optee_msg_arg + * + * Returns the size of the struct optee_msg_arg together with the number + * of embedded parameters. + */ +#define OPTEE_MSG_GET_ARG_SIZE(num_params) \ + (sizeof(struct optee_msg_arg) + \ + sizeof(struct optee_msg_param) * (num_params)) + +/* + * Defines the maximum value of @num_params that can be passed to + * OPTEE_MSG_GET_ARG_SIZE without a risk of crossing page boundary. + */ +#define OPTEE_MSG_MAX_NUM_PARAMS \ + ((OPTEE_MSG_NONCONTIG_PAGE_SIZE - sizeof(struct optee_msg_arg)) / \ + sizeof(struct optee_msg_param)) + +#endif /*ASM*/ + +/***************************************************************************** + * Part 2 - requests from normal world + *****************************************************************************/ + +/* + * Return the following UID if using API specified in this file without + * further extensions: + * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. + * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, + * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3. + */ +#define OPTEE_MSG_UID_0 0x384fb3e0 +#define OPTEE_MSG_UID_1 0xe7f811e3 +#define OPTEE_MSG_UID_2 0xaf630002 +#define OPTEE_MSG_UID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01 + +/* + * Returns 2.0 if using API specified in this file without further + * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR + * and OPTEE_MSG_REVISION_MINOR + */ +#define OPTEE_MSG_REVISION_MAJOR 2 +#define OPTEE_MSG_REVISION_MINOR 0 +#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03 + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in 4 32-bit words in the same way as + * OPTEE_MSG_FUNCID_CALLS_UID described above. + */ +#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0 +#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3 +#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002 +#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000 + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in 2 32-bit words in the same way as + * OPTEE_MSG_CALLS_REVISION described above. + */ +#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 + +/* + * Do a secure call with struct optee_msg_arg as argument + * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd + * + * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application. + * The first two parameters are tagged as meta, holding two value + * parameters to pass the following information: + * param[0].u.value.a-b uuid of Trusted Application + * param[1].u.value.a-b uuid of Client + * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_* + * + * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened + * session to a Trusted Application. struct optee_msg_arg::func is Trusted + * Application function, specific to the Trusted Application. + * + * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to + * Trusted Application. + * + * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command. + * + * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The + * information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + * [| OPTEE_MSG_ATTR_NONCONTIG] + * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [in] param[0].u.tmem.size size (of first fragment) + * [in] param[0].u.tmem.shm_ref holds shared memory reference + * + * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared + * memory reference. The information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + * [in] param[0].u.rmem.shm_ref holds shared memory reference + * [in] param[0].u.rmem.offs 0 + * [in] param[0].u.rmem.size 0 + */ +#define OPTEE_MSG_CMD_OPEN_SESSION 0 +#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 +#define OPTEE_MSG_CMD_CLOSE_SESSION 2 +#define OPTEE_MSG_CMD_CANCEL 3 +#define OPTEE_MSG_CMD_REGISTER_SHM 4 +#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 + +#endif /* _OPTEE_MSG_H */ diff --git a/xen/arch/arm/include/asm/tee/optee_rpc_cmd.h b/xen/arch/arm/include/asm/tee/optee_rpc_cmd.h new file mode 100644 index 0000000000..d6b9dfe30c --- /dev/null +++ b/xen/arch/arm/include/asm/tee/optee_rpc_cmd.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (c) 2016-2017, Linaro Limited + */ + +#ifndef __OPTEE_RPC_CMD_H +#define __OPTEE_RPC_CMD_H + +/* + * All RPC is done with a struct optee_msg_arg as bearer of information, + * struct optee_msg_arg::arg holds values defined by OPTEE_RPC_CMD_* below. + * Only the commands handled by the kernel driver are defined here. + * + * RPC communication with tee-supplicant is reversed compared to normal + * client communication described above. The supplicant receives requests + * and sends responses. + */ + +/* + * Load a TA into memory + * + * Since the size of the TA isn't known in advance the size of the TA is + * can be queried with a NULL buffer. + * + * [in] value[0].a-b UUID + * [out] memref[1] Buffer with TA + */ +#define OPTEE_RPC_CMD_LOAD_TA 0 + +/* + * Replay Protected Memory Block access + * + * [in] memref[0] Frames to device + * [out] memref[1] Frames from device + */ +#define OPTEE_RPC_CMD_RPMB 1 + +/* + * File system access, see definition of protocol below + */ +#define OPTEE_RPC_CMD_FS 2 + +/* + * Get time + * + * Returns number of seconds and nano seconds since the Epoch, + * 1970-01-01 00:00:00 +0000 (UTC). + * + * [out] value[0].a Number of seconds + * [out] value[0].b Number of nano seconds. + */ +#define OPTEE_RPC_CMD_GET_TIME 3 + +/* + * Wait queue primitive, helper for secure world to implement a wait queue. + * + * If secure world needs to wait for a secure world mutex it issues a sleep + * request instead of spinning in secure world. Conversely is a wakeup + * request issued when a secure world mutex with a thread waiting thread is + * unlocked. + * + * Waiting on a key + * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP + * [in] value[0].b Wait key + * + * Waking up a key + * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP + * [in] value[0].b Wakeup key + */ +#define OPTEE_RPC_CMD_WAIT_QUEUE 4 +#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0 +#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1 + +/* + * Suspend execution + * + * [in] value[0].a Number of milliseconds to suspend + */ +#define OPTEE_RPC_CMD_SUSPEND 5 + +/* + * Allocate a piece of shared memory + * + * [in] value[0].a Type of memory one of + * OPTEE_RPC_SHM_TYPE_* below + * [in] value[0].b Requested size + * [in] value[0].c Required alignment + * [out] memref[0] Buffer + */ +#define OPTEE_RPC_CMD_SHM_ALLOC 6 +/* Memory that can be shared with a non-secure user space application */ +#define OPTEE_RPC_SHM_TYPE_APPL 0 +/* Memory only shared with non-secure kernel */ +#define OPTEE_RPC_SHM_TYPE_KERNEL 1 +/* + * Memory shared with non-secure kernel and exported to a non-secure user + * space application + */ +#define OPTEE_RPC_SHM_TYPE_GLOBAL 2 + +/* + * Free shared memory previously allocated with OPTEE_RPC_CMD_SHM_ALLOC + * + * [in] value[0].a Type of memory one of + * OPTEE_RPC_SHM_TYPE_* above + * [in] value[0].b Value of shared memory reference or cookie + */ +#define OPTEE_RPC_CMD_SHM_FREE 7 + +/* Was OPTEE_RPC_CMD_SQL_FS, which isn't supported any longer */ +#define OPTEE_RPC_CMD_SQL_FS_RESERVED 8 + +/* + * Send TA profiling information to normal world + * + * [in/out] value[0].a File identifier. Must be set to 0 on + * first call. A value >= 1 will be + * returned on success. Re-use this value + * to append data to the same file. + * [in] memref[1] TA UUID + * [in] memref[2] Profile data + */ +#define OPTEE_RPC_CMD_GPROF 9 + +/* + * Socket command, see definition of protocol below + */ +#define OPTEE_RPC_CMD_SOCKET 10 + +/* + * Register timestamp buffer in the linux kernel optee driver + * + * [in] value[0].a Subcommand (register buffer, unregister buffer) + * [in] value[0].b Physical address of timestamp buffer + * [in] value[0].c Size of buffer + */ +#define OPTEE_RPC_CMD_BENCH_REG 20 + +/* + * Definition of protocol for command OPTEE_RPC_CMD_FS + */ + +/* + * Open a file + * + * [in] value[0].a OPTEE_RPC_FS_OPEN + * [in] memref[1] A string holding the file name + * [out] value[2].a File descriptor of open file + */ +#define OPTEE_RPC_FS_OPEN 0 + +/* + * Create a file + * + * [in] value[0].a OPTEE_RPC_FS_CREATE + * [in] memref[1] A string holding the file name + * [out] value[2].a File descriptor of open file + */ +#define OPTEE_RPC_FS_CREATE 1 + +/* + * Close a file + * + * [in] value[0].a OPTEE_RPC_FS_CLOSE + * [in] value[0].b File descriptor of open file. + */ +#define OPTEE_RPC_FS_CLOSE 2 + +/* + * Read from a file + * + * [in] value[0].a OPTEE_RPC_FS_READ + * [in] value[0].b File descriptor of open file + * [in] value[0].c Offset into file + * [out] memref[1] Buffer to hold returned data + */ +#define OPTEE_RPC_FS_READ 3 + +/* + * Write to a file + * + * [in] value[0].a OPTEE_RPC_FS_WRITE + * [in] value[0].b File descriptor of open file + * [in] value[0].c Offset into file + * [in] memref[1] Buffer holding data to be written + */ +#define OPTEE_RPC_FS_WRITE 4 + +/* + * Truncate a file + * + * [in] value[0].a OPTEE_RPC_FS_TRUNCATE + * [in] value[0].b File descriptor of open file + * [in] value[0].c Length of file. + */ +#define OPTEE_RPC_FS_TRUNCATE 5 + +/* + * Remove a file + * + * [in] value[0].a OPTEE_RPC_FS_REMOVE + * [in] memref[1] A string holding the file name + */ +#define OPTEE_RPC_FS_REMOVE 6 + +/* + * Rename a file + * + * [in] value[0].a OPTEE_RPC_FS_RENAME + * [in] value[0].b True if existing target should be removed + * [in] memref[1] A string holding the old file name + * [in] memref[2] A string holding the new file name + */ +#define OPTEE_RPC_FS_RENAME 7 + +/* + * Opens a directory for file listing + * + * [in] value[0].a OPTEE_RPC_FS_OPENDIR + * [in] memref[1] A string holding the name of the directory + * [out] value[2].a Handle to open directory + */ +#define OPTEE_RPC_FS_OPENDIR 8 + +/* + * Closes a directory handle + * + * [in] value[0].a OPTEE_RPC_FS_CLOSEDIR + * [in] value[0].b Handle to open directory + */ +#define OPTEE_RPC_FS_CLOSEDIR 9 + +/* + * Read next file name of directory + * + * + * [in] value[0].a OPTEE_RPC_FS_READDIR + * [in] value[0].b Handle to open directory + * [out] memref[1] A string holding the file name + */ +#define OPTEE_RPC_FS_READDIR 10 + +/* End of definition of protocol for command OPTEE_RPC_CMD_FS */ + +/* + * Definition of protocol for command OPTEE_RPC_CMD_SOCKET + */ + +#define OPTEE_RPC_SOCKET_TIMEOUT_NONBLOCKING 0 +#define OPTEE_RPC_SOCKET_TIMEOUT_BLOCKING 0xffffffff + +/* + * Open socket + * + * [in] value[0].a OPTEE_RPC_SOCKET_OPEN + * [in] value[0].b TA instance id + * [in] value[1].a Server port number + * [in] value[1].b Protocol, TEE_ISOCKET_PROTOCOLID_* + * [in] value[1].c Ip version TEE_IP_VERSION_* from tee_ipsocket.h + * [in] memref[2] Server address + * [out] value[3].a Socket handle (32-bit) + */ +#define OPTEE_RPC_SOCKET_OPEN 0 + +/* + * Close socket + * + * [in] value[0].a OPTEE_RPC_SOCKET_CLOSE + * [in] value[0].b TA instance id + * [in] value[0].c Socket handle + */ +#define OPTEE_RPC_SOCKET_CLOSE 1 + +/* + * Close all sockets + * + * [in] value[0].a OPTEE_RPC_SOCKET_CLOSE_ALL + * [in] value[0].b TA instance id + */ +#define OPTEE_RPC_SOCKET_CLOSE_ALL 2 + +/* + * Send data on socket + * + * [in] value[0].a OPTEE_RPC_SOCKET_SEND + * [in] value[0].b TA instance id + * [in] value[0].c Socket handle + * [in] memref[1] Buffer to transmit + * [in] value[2].a Timeout ms or OPTEE_RPC_SOCKET_TIMEOUT_* + * [out] value[2].b Number of transmitted bytes + */ +#define OPTEE_RPC_SOCKET_SEND 3 + +/* + * Receive data on socket + * + * [in] value[0].a OPTEE_RPC_SOCKET_RECV + * [in] value[0].b TA instance id + * [in] value[0].c Socket handle + * [out] memref[1] Buffer to receive + * [in] value[2].a Timeout ms or OPTEE_RPC_SOCKET_TIMEOUT_* + */ +#define OPTEE_RPC_SOCKET_RECV 4 + +/* + * Perform IOCTL on socket + * + * [in] value[0].a OPTEE_RPC_SOCKET_IOCTL + * [in] value[0].b TA instance id + * [in] value[0].c Socket handle + * [in/out] memref[1] Buffer + * [in] value[2].a Ioctl command + */ +#define OPTEE_RPC_SOCKET_IOCTL 5 + +/* End of definition of protocol for command OPTEE_RPC_CMD_SOCKET */ + +#endif /*__OPTEE_RPC_CMD_H*/ diff --git a/xen/arch/arm/include/asm/tee/optee_smc.h b/xen/arch/arm/include/asm/tee/optee_smc.h new file mode 100644 index 0000000000..2f5c702326 --- /dev/null +++ b/xen/arch/arm/include/asm/tee/optee_smc.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (c) 2015, Linaro Limited + */ +#ifndef OPTEE_SMC_H +#define OPTEE_SMC_H + +/* + * This file is exported by OP-TEE and is in kept in sync between secure + * world and normal world kernel driver. We're following ARM SMC Calling + * Convention as specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + * + * This file depends on optee_msg.h being included to expand the SMC id + * macros below. + */ + + +#define OPTEE_SMC_STD_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) +#define OPTEE_SMC_FAST_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) + +/* + * Function specified by SMC Calling convention. + */ +#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00 +#define OPTEE_SMC_CALLS_COUNT \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_COUNT) + +/* + * Normal cached memory (write-back), shareable for SMP systems and not + * shareable for UP systems. + */ +#define OPTEE_SMC_SHM_CACHED 1 + +/* + * a0..a7 is used as register names in the descriptions below, on arm32 + * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's + * 32-bit registers. + */ + +/* + * Function specified by SMC Calling convention + * + * Return the following UID if using API specified in this file + * without further extensions: + * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. + * see also OPTEE_MSG_UID_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID +#define OPTEE_SMC_CALLS_UID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_UID) + +/* + * Function specified by SMC Calling convention + * + * Returns 2.0 if using API specified in this file without further extensions. + * see also OPTEE_MSG_REVISION_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION +#define OPTEE_SMC_CALLS_REVISION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_REVISION) + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID +#define OPTEE_SMC_CALL_GET_OS_UUID \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID) + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION + * described above. May optionally return a 32-bit build identifier in a2, + * with zero meaning unspecified. + */ +#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION +#define OPTEE_SMC_CALL_GET_OS_REVISION \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) + +/* + * Call with struct optee_msg_arg as argument + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG + * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg + * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg + * a3 Cache settings, not used if physical pointer is in a predefined shared + * memory area else per OPTEE_SMC_SHM_* + * a4-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 Return value, OPTEE_SMC_RETURN_* + * a1-3 Not used + * a4-7 Preserved + * + * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage: + * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT + * a1-3 Preserved + * a4-7 Preserved + * + * RPC return register usage: + * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val) + * a1-2 RPC parameters + * a3-7 Resume information, must be preserved + * + * Possible return values: + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Call completed, result updated in + * the previously supplied struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded, + * try again later. + * OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg + * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal + * world. + */ +#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG +#define OPTEE_SMC_CALL_WITH_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) + +/* + * Get Shared Memory Config + * + * Returns the Secure/Non-secure shared memory config. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Physical address of start of SHM + * a2 Size of of SHM + * a3 Cache settings of memory, as defined by the + * OPTEE_SMC_SHM_* values above + * a4-7 Preserved + * + * Not available register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-3 Not used + * a4-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7 +#define OPTEE_SMC_GET_SHM_CONFIG \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG) + +/* + * Configures L2CC mutex + * + * Disables, enables usage of L2CC mutex. Returns or sets physical address + * of L2CC mutex. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_L2CC_MUTEX + * a1 OPTEE_SMC_L2CC_MUTEX_GET_ADDR Get physical address of mutex + * OPTEE_SMC_L2CC_MUTEX_SET_ADDR Set physical address of mutex + * OPTEE_SMC_L2CC_MUTEX_ENABLE Enable usage of mutex + * OPTEE_SMC_L2CC_MUTEX_DISABLE Disable usage of mutex + * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, upper 32bit of a 64bit + * physical address of mutex + * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, lower 32bit of a 64bit + * physical address of mutex + * a3-6 Not used + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Preserved + * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, upper 32bit of a 64bit + * physical address of mutex + * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, lower 32bit of a 64bit + * physical address of mutex + * a3-7 Preserved + * + * Error return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL Physical address not available + * OPTEE_SMC_RETURN_EBADADDR Bad supplied physical address + * OPTEE_SMC_RETURN_EBADCMD Unsupported value in a1 + * a1-7 Preserved + */ +#define OPTEE_SMC_L2CC_MUTEX_GET_ADDR 0 +#define OPTEE_SMC_L2CC_MUTEX_SET_ADDR 1 +#define OPTEE_SMC_L2CC_MUTEX_ENABLE 2 +#define OPTEE_SMC_L2CC_MUTEX_DISABLE 3 +#define OPTEE_SMC_FUNCID_L2CC_MUTEX 8 +#define OPTEE_SMC_L2CC_MUTEX \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_L2CC_MUTEX) + +/* + * Exchanges capabilities between normal world and secure world + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES + * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_* + * a2-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + * + * Error return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + */ +/* Normal world works as a uniprocessor system */ +#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR (1 << 0) +/* Secure world has reserved shared memory for normal world to use */ +#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM (1 << 0) +/* Secure world can communicate via previously unregistered shared memory */ +#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM (1 << 1) + +/* + * Secure world supports commands "register/unregister shared memory", + * secure world accepts command buffers located in any parts of non-secure RAM + */ +#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM (1 << 2) + +/* Secure world supports Shared Memory with a NULL reference */ +#define OPTEE_SMC_SEC_CAP_MEMREF_NULL (1 << 4) + +#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 +#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) + +/* + * Disable and empties cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns one shared memory reference to free. To disable the + * cache and free all cached objects this function has to be called until + * it returns OPTEE_SMC_RETURN_ENOTAVAIL. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Upper 32 bits of a 64-bit Shared memory cookie + * a2 Lower 32 bits of a 64-bit Shared memory cookie + * a3-7 Preserved + * + * Cache empty return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10 +#define OPTEE_SMC_DISABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE) + +/* + * Enable cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If + * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11 +#define OPTEE_SMC_ENABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) + +/* + * Release of secondary cores + * + * OP-TEE in secure world is in charge of the release process of secondary + * cores. The Rich OS issue the this request to ask OP-TEE to boot up the + * secondary cores, go through the OP-TEE per-core initialization, and then + * switch to the Non-seCure world with the Rich OS provided entry address. + * The secondary cores enter Non-Secure world in SVC mode, with Thumb, FIQ, + * IRQ and Abort bits disabled. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_BOOT_SECONDARY + * a1 Index of secondary core to boot + * a2 Upper 32 bits of a 64-bit Non-Secure world entry physical address + * a3 Lower 32 bits of a 64-bit Non-Secure world entry physical address + * a4-7 Not used + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Error return: + * a0 OPTEE_SMC_RETURN_EBADCMD Core index out of range + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_BOOT_SECONDARY 12 +#define OPTEE_SMC_BOOT_SECONDARY \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_BOOT_SECONDARY) + +/* + * Inform OP-TEE about a new virtual machine + * + * Hypervisor issues this call during virtual machine (guest) creation. + * OP-TEE records client id of new virtual machine and prepares + * to receive requests from it. This call is available only if OP-TEE + * was built with virtualization support. + * + * Call requests usage: + * a0 SMC Function ID, OPTEE_SMC_VM_CREATED + * a1 Hypervisor Client ID of newly created virtual machine + * a2-6 Not used + * a7 Hypervisor Client ID register. Must be 0, because only hypervisor + * can issue this call + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Error return: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL OP-TEE have no resources for + * another VM + * a1-7 Preserved + * + */ +#define OPTEE_SMC_FUNCID_VM_CREATED 13 +#define OPTEE_SMC_VM_CREATED \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_CREATED) + +/* + * Inform OP-TEE about shutdown of a virtual machine + * + * Hypervisor issues this call during virtual machine (guest) destruction. + * OP-TEE will clean up all resources associated with this VM. This call is + * available only if OP-TEE was built with virtualization support. + * + * Call requests usage: + * a0 SMC Function ID, OPTEE_SMC_VM_DESTROYED + * a1 Hypervisor Client ID of virtual machine being shut down + * a2-6 Not used + * a7 Hypervisor Client ID register. Must be 0, because only hypervisor + * can issue this call + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + */ +#define OPTEE_SMC_FUNCID_VM_DESTROYED 14 +#define OPTEE_SMC_VM_DESTROYED \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_DESTROYED) + +/* + * Query OP-TEE about number of supported threads + * + * Normal World OS or Hypervisor issues this call to find out how many + * threads OP-TEE supports. That is how many standard calls can be issued + * in parallel before OP-TEE will return OPTEE_SMC_RETURN_ETHREAD_LIMIT. + * + * Call requests usage: + * a0 SMC Function ID, OPTEE_SMC_GET_THREAD_COUNT + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Number of threads + * a2-7 Preserved + * + * Error return: + * a0 OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Requested call is not implemented + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_THREAD_COUNT 15 +#define OPTEE_SMC_GET_THREAD_COUNT \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT) + +/* + * Resume from RPC (for example after processing a foreign interrupt) + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC + * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned + * OPTEE_SMC_RETURN_RPC in a0 + * + * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above. + * + * Possible return values + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Original call completed, result + * updated in the previously supplied. + * struct optee_msg_arg + * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal + * world. + * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume + * information was corrupt. + */ +#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3 +#define OPTEE_SMC_CALL_RETURN_FROM_RPC \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC) + +#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF + +#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \ + ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK) + +#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX) + +/* + * Allocate memory for RPC parameter passing. The memory is used to hold a + * struct optee_msg_arg. + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC + * a1 Size in bytes of required argument memory + * a2 Not used + * a3 Resume information, must be preserved + * a4-5 Not used + * a6-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1 Upper 32 bits of 64-bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated. + * a2 Lower 32 bits of 64-bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated + * a3 Preserved + * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a6-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_ALLOC 0 +#define OPTEE_SMC_RETURN_RPC_ALLOC \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC) + +/* + * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_FREE + * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this + * argument memory + * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this + * argument memory + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FREE 2 +#define OPTEE_SMC_RETURN_RPC_FREE \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) + +/* + * Deliver a foreign interrupt in normal world. + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR + * a1-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4 +#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR) + +/* + * Do an RPC request. The supplied struct optee_msg_arg tells which + * request to do and the parameters for the request. The following fields + * are used (the rest are unused): + * - cmd the Request ID + * - ret return value of the request, filled in by normal world + * - num_params number of parameters for the request + * - params the parameters + * - param_attrs attributes of the parameters + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_CMD + * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_CMD 5 +#define OPTEE_SMC_RETURN_RPC_CMD \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD) + +/* Returned in a0 */ +#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF + +/* Returned in a0 only from Trusted OS functions */ +#define OPTEE_SMC_RETURN_OK 0x0 +#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1 +#define OPTEE_SMC_RETURN_EBUSY 0x2 +#define OPTEE_SMC_RETURN_ERESUME 0x3 +#define OPTEE_SMC_RETURN_EBADADDR 0x4 +#define OPTEE_SMC_RETURN_EBADCMD 0x5 +#define OPTEE_SMC_RETURN_ENOMEM 0x6 +#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7 +#define OPTEE_SMC_RETURN_IS_RPC(ret) \ + (((ret) != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION) && \ + ((((ret) & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == \ + OPTEE_SMC_RETURN_RPC_PREFIX))) + +#endif /* OPTEE_SMC_H */ diff --git a/xen/arch/arm/include/asm/tee/tee.h b/xen/arch/arm/include/asm/tee/tee.h new file mode 100644 index 0000000000..f483986385 --- /dev/null +++ b/xen/arch/arm/include/asm/tee/tee.h @@ -0,0 +1,112 @@ +/* + * xen/include/asm-arm/tee/tee.h + * + * Generic part of TEE mediator subsystem + * + * Volodymyr Babchuk + * Copyright (c) 2018 EPAM Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ARCH_ARM_TEE_TEE_H__ +#define __ARCH_ARM_TEE_TEE_H__ + +#include +#include + +#include + +#ifdef CONFIG_TEE + +struct tee_mediator_ops { + /* + * Probe for TEE. Should return true if TEE found and + * mediator is initialized. + */ + bool (*probe)(void); + + /* + * Called during domain construction if toolstack requests to enable + * TEE support so mediator can inform TEE about new + * guest and create own structures for the new domain. + */ + int (*domain_init)(struct domain *d); + + /* + * Called during domain destruction to relinquish resources used + * by mediator itself. This function can return -ERESTART to indicate + * that it does not finished work and should be called again. + */ + int (*relinquish_resources)(struct domain *d); + + /* Handle SMCCC call for current domain. */ + bool (*handle_call)(struct cpu_user_regs *regs); +}; + +struct tee_mediator_desc { + /* Printable name of the TEE. */ + const char *name; + + /* Mediator callbacks as described above. */ + const struct tee_mediator_ops *ops; + + /* + * ID of TEE. Corresponds to xen_arch_domainconfig.tee_type. + * Should be one of XEN_DOMCTL_CONFIG_TEE_xxx + */ + uint16_t tee_type; +}; + +bool tee_handle_call(struct cpu_user_regs *regs); +int tee_domain_init(struct domain *d, uint16_t tee_type); +int tee_relinquish_resources(struct domain *d); +uint16_t tee_get_type(void); + +#define REGISTER_TEE_MEDIATOR(_name, _namestr, _type, _ops) \ +static const struct tee_mediator_desc __tee_desc_##_name __used \ +__section(".teemediator.info") = { \ + .name = _namestr, \ + .ops = _ops, \ + .tee_type = _type \ +} + +#else + +static inline bool tee_handle_call(struct cpu_user_regs *regs) +{ + return false; +} + +static inline int tee_domain_init(struct domain *d, uint16_t tee_type) +{ + if ( likely(tee_type == XEN_DOMCTL_CONFIG_TEE_NONE) ) + return 0; + + return -ENODEV; +} + +static inline int tee_relinquish_resources(struct domain *d) +{ + return 0; +} + +static inline uint16_t tee_get_type(void) +{ + return XEN_DOMCTL_CONFIG_TEE_NONE; +} + +#endif /* CONFIG_TEE */ + +#endif /* __ARCH_ARM_TEE_TEE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/time.h b/xen/arch/arm/include/asm/time.h new file mode 100644 index 0000000000..4b401c1110 --- /dev/null +++ b/xen/arch/arm/include/asm/time.h @@ -0,0 +1,118 @@ +#ifndef __ARM_TIME_H__ +#define __ARM_TIME_H__ + +#include +#include +#include + +#define DT_MATCH_TIMER \ + DT_MATCH_COMPATIBLE("arm,armv7-timer"), \ + DT_MATCH_COMPATIBLE("arm,armv8-timer") + +typedef uint64_t cycles_t; + +/* + * Ensure that reads of the counter are treated the same as memory reads + * for the purposes of ordering by subsequent memory barriers. + */ +#if defined(CONFIG_ARM_64) +#define read_cntpct_enforce_ordering(val) do { \ + uint64_t tmp, _val = (val); \ + \ + asm volatile( \ + "eor %0, %1, %1\n" \ + "add %0, sp, %0\n" \ + "ldr xzr, [%0]" \ + : "=r" (tmp) : "r" (_val)); \ +} while (0) +#else +#define read_cntpct_enforce_ordering(val) do {} while (0) +#endif + +static inline cycles_t read_cntpct_stable(void) +{ + /* + * ARM_WORKAROUND_858921: Cortex-A73 (all versions) counter read + * can return a wrong value when the counter crosses a 32bit boundary. + */ + if ( !check_workaround_858921() ) + return READ_SYSREG64(CNTPCT_EL0); + else + { + /* + * A recommended workaround for erratum 858921 is to: + * 1- Read twice CNTPCT. + * 2- Compare bit[32] of the two read values. + * - If bit[32] is different, keep the old value. + * - If bit[32] is the same, keep the new value. + */ + cycles_t old, new; + old = READ_SYSREG64(CNTPCT_EL0); + new = READ_SYSREG64(CNTPCT_EL0); + return (((old ^ new) >> 32) & 1) ? old : new; + } +} + +static inline cycles_t get_cycles(void) +{ + cycles_t cnt; + + isb(); + cnt = read_cntpct_stable(); + + /* + * If there is not any barrier here. When get_cycles being used in + * some seqlock critical context in the future, the seqlock can be + * speculated potentially. + * + * To prevent seqlock from being speculated silently, we add a barrier + * here defensively. Normally, we just need an ISB here is enough, but + * considering the minimum performance cost. We prefer to use enforce + * order here. + */ + read_cntpct_enforce_ordering(cnt); + + return cnt; +} + +/* List of timer's IRQ */ +enum timer_ppi +{ + TIMER_PHYS_SECURE_PPI = 0, + TIMER_PHYS_NONSECURE_PPI = 1, + TIMER_VIRT_PPI = 2, + TIMER_HYP_PPI = 3, + MAX_TIMER_PPI = 4, +}; + +/* + * Value of "clock-frequency" in the DT timer node if present. + * 0 means the property doesn't exist. + */ +extern uint32_t timer_dt_clock_frequency; + +/* Get one of the timer IRQ number */ +unsigned int timer_get_irq(enum timer_ppi ppi); + +/* Set up the timer interrupt on this CPU */ +extern void init_timer_interrupt(void); + +/* Counter value at boot time */ +extern uint64_t boot_count; + +extern s_time_t ticks_to_ns(uint64_t ticks); +extern uint64_t ns_to_ticks(s_time_t ns); + +void preinit_xen_time(void); + +void force_update_vcpu_system_time(struct vcpu *v); + +#endif /* __ARM_TIME_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/trace.h b/xen/arch/arm/include/asm/trace.h new file mode 100644 index 0000000000..e06def61f6 --- /dev/null +++ b/xen/arch/arm/include/asm/trace.h @@ -0,0 +1,12 @@ +#ifndef __ASM_TRACE_H__ +#define __ASM_TRACE_H__ + +#endif /* __ASM_TRACE_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/traps.h b/xen/arch/arm/include/asm/traps.h new file mode 100644 index 0000000000..2ed2b85c6f --- /dev/null +++ b/xen/arch/arm/include/asm/traps.h @@ -0,0 +1,121 @@ +#ifndef __ASM_ARM_TRAPS__ +#define __ASM_ARM_TRAPS__ + +#include +#include + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#endif + +/* + * GUEST_BUG_ON is intended for checking that the guest state has not been + * corrupted in hardware and/or that the hardware behaves as we + * believe it should (i.e. that certain traps can only occur when the + * guest is in a particular mode). + * + * The intention is to limit the damage such h/w bugs (or spec + * misunderstandings) can do by turning them into Denial of Service + * attacks instead of e.g. information leaks or privilege escalations. + * + * GUEST_BUG_ON *MUST* *NOT* be used to check for guest controllable state! + * + * Compared with regular BUG_ON it dumps the guest vcpu state instead + * of Xen's state. + */ +#define guest_bug_on_failed(p) \ +do { \ + show_execution_state(guest_cpu_user_regs()); \ + panic("Guest Bug: %pv: '%s', line %d, file %s\n", \ + current, p, __LINE__, __FILE__); \ +} while (0) +#define GUEST_BUG_ON(p) \ + do { if ( unlikely(p) ) guest_bug_on_failed(#p); } while (0) + +int check_conditional_instr(struct cpu_user_regs *regs, const union hsr hsr); + +void advance_pc(struct cpu_user_regs *regs, const union hsr hsr); + +void inject_undef_exception(struct cpu_user_regs *regs, const union hsr hsr); + +/* read as zero and write ignore */ +void handle_raz_wi(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el); + +/* write only as write ignore */ +void handle_wo_wi(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el); + +/* read only as read as zero */ +void handle_ro_raz(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el); + +/* Read only as value provided with 'val' argument */ +void handle_ro_read_val(struct cpu_user_regs *regs, int regidx, bool read, + const union hsr hsr, int min_el, register_t val); + +/* Co-processor registers emulation (see arch/arm/vcpreg.c). */ +void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr); +void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr); +void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr); +void do_cp14_64(struct cpu_user_regs *regs, const union hsr hsr); +void do_cp14_dbg(struct cpu_user_regs *regs, const union hsr hsr); +void do_cp10(struct cpu_user_regs *regs, const union hsr hsr); +void do_cp(struct cpu_user_regs *regs, const union hsr hsr); + +/* SMCCC handling */ +void do_trap_smc(struct cpu_user_regs *regs, const union hsr hsr); +void do_trap_hvc_smccc(struct cpu_user_regs *regs); + +int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc); + +void noreturn do_unexpected_trap(const char *msg, + const struct cpu_user_regs *regs); + +/* Functions for pending virtual abort checking window. */ +void abort_guest_exit_start(void); +void abort_guest_exit_end(void); + +static inline bool VABORT_GEN_BY_GUEST(const struct cpu_user_regs *regs) +{ + return ((unsigned long)abort_guest_exit_start == regs->pc) || + (unsigned long)abort_guest_exit_end == regs->pc; +} + +/* Check whether the sign extension is required and perform it */ +static inline register_t sign_extend(const struct hsr_dabt dabt, register_t r) +{ + uint8_t size = (1 << dabt.size) * 8; + + /* + * Sign extend if required. + * Note that we expect the read handler to have zeroed the bits + * outside the requested access size. + */ + if ( dabt.sign && (size < sizeof(register_t) * 8) && + (r & (1UL << (size - 1))) ) + { + /* + * We are relying on register_t using the same as + * an unsigned long in order to keep the 32-bit assembly + * code smaller. + */ + BUILD_BUG_ON(sizeof(register_t) != sizeof(unsigned long)); + r |= (~0UL) << size; + } + + return r; +} + +#endif /* __ASM_ARM_TRAPS__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ + diff --git a/xen/arch/arm/include/asm/types.h b/xen/arch/arm/include/asm/types.h new file mode 100644 index 0000000000..083acbd151 --- /dev/null +++ b/xen/arch/arm/include/asm/types.h @@ -0,0 +1,80 @@ +#ifndef __ARM_TYPES_H__ +#define __ARM_TYPES_H__ + +#ifndef __ASSEMBLY__ + + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +#if defined(CONFIG_ARM_32) +typedef __signed__ long long __s64; +typedef unsigned long long __u64; +#elif defined (CONFIG_ARM_64) +typedef __signed__ long __s64; +typedef unsigned long __u64; +#endif +#endif + +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +#if defined(CONFIG_ARM_32) +typedef signed long long s64; +typedef unsigned long long u64; +typedef u32 vaddr_t; +#define PRIvaddr PRIx32 +typedef u64 paddr_t; +#define INVALID_PADDR (~0ULL) +#define PRIpaddr "016llx" +typedef u32 register_t; +#define PRIregister "08x" +#elif defined (CONFIG_ARM_64) +typedef signed long s64; +typedef unsigned long u64; +typedef u64 vaddr_t; +#define PRIvaddr PRIx64 +typedef u64 paddr_t; +#define INVALID_PADDR (~0UL) +#define PRIpaddr "016lx" +typedef u64 register_t; +#define PRIregister "016lx" +#endif + +#if defined(__SIZE_TYPE__) +typedef __SIZE_TYPE__ size_t; +#else +typedef unsigned long size_t; +#endif +typedef signed long ssize_t; + +#if defined(__PTRDIFF_TYPE__) +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#else +typedef signed long ptrdiff_t; +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __ARM_TYPES_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/vfp.h b/xen/arch/arm/include/asm/vfp.h new file mode 100644 index 0000000000..142a91ef8b --- /dev/null +++ b/xen/arch/arm/include/asm/vfp.h @@ -0,0 +1,25 @@ +#ifndef _ASM_VFP_H +#define _ASM_VFP_H + +struct vcpu; + +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "Unknown ARM variant" +#endif + +void vfp_save_state(struct vcpu *v); +void vfp_restore_state(struct vcpu *v); + +#endif /* _ASM_VFP_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/vgic-emul.h b/xen/arch/arm/include/asm/vgic-emul.h new file mode 100644 index 0000000000..e52fbaa3ec --- /dev/null +++ b/xen/arch/arm/include/asm/vgic-emul.h @@ -0,0 +1,33 @@ +#ifndef __ASM_ARM_VGIC_EMUL_H__ +#define __ASM_ARM_VGIC_EMUL_H__ + +/* + * Helpers to create easily a case to match emulate a single register or + * a range of registers + */ + +#define VREG32(reg) reg ... reg + 3 +#define VREG64(reg) reg ... reg + 7 + +#define VRANGE32(start, end) start ... end + 3 +#define VRANGE64(start, end) start ... end + 7 + +/* + * 64 bits registers can be accessible using 32-bit and 64-bit unless + * stated otherwise (See 8.1.3 ARM IHI 0069A). + */ +static inline bool vgic_reg64_check_access(struct hsr_dabt dabt) +{ + return ( dabt.size == DABT_DOUBLE_WORD || dabt.size == DABT_WORD ); +} + +#endif /* __ASM_ARM_VGIC_EMUL_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/vgic.h b/xen/arch/arm/include/asm/vgic.h new file mode 100644 index 0000000000..ade427a808 --- /dev/null +++ b/xen/arch/arm/include/asm/vgic.h @@ -0,0 +1,383 @@ +/* + * ARM Virtual Generic Interrupt Controller support + * + * Ian Campbell + * Copyright (c) 2011 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARM_VGIC_H__ +#define __ASM_ARM_VGIC_H__ + +#ifdef CONFIG_NEW_VGIC +#include +#else + +#include +#include + +struct pending_irq +{ + /* + * The following two states track the lifecycle of the guest irq. + * However because we are not sure and we don't want to track + * whether an irq added to an LR register is PENDING or ACTIVE, the + * following states are just an approximation. + * + * GIC_IRQ_GUEST_QUEUED: the irq is asserted and queued for + * injection into the guest's LRs. + * + * GIC_IRQ_GUEST_VISIBLE: the irq has been added to an LR register, + * therefore the guest is aware of it. From the guest point of view + * the irq can be pending (if the guest has not acked the irq yet) + * or active (after acking the irq). + * + * In order for the state machine to be fully accurate, for level + * interrupts, we should keep the interrupt's pending state until + * the guest deactivates the irq. However because we are not sure + * when that happens, we instead track whether there is an interrupt + * queued using GIC_IRQ_GUEST_QUEUED. We clear it when we add it to + * an LR register. We set it when we receive another interrupt + * notification. Therefore it is possible to set + * GIC_IRQ_GUEST_QUEUED while the irq is GIC_IRQ_GUEST_VISIBLE. We + * could also change the state of the guest irq in the LR register + * from active to active and pending, but for simplicity we simply + * inject a second irq after the guest EOIs the first one. + * + * + * An additional state is used to keep track of whether the guest + * irq is enabled at the vgicd level: + * + * GIC_IRQ_GUEST_ENABLED: the guest IRQ is enabled at the VGICD + * level (GICD_ICENABLER/GICD_ISENABLER). + * + * GIC_IRQ_GUEST_MIGRATING: the irq is being migrated to a different + * vcpu while it is still inflight and on an GICH_LR register on the + * old vcpu. + * + * GIC_IRQ_GUEST_PRISTINE_LPI: the IRQ is a newly mapped LPI, which + * has never been in an LR before. This means that any trace of an + * LPI with the same number in an LR must be from an older LPI, which + * has been unmapped before. + * + */ +#define GIC_IRQ_GUEST_QUEUED 0 +#define GIC_IRQ_GUEST_ACTIVE 1 +#define GIC_IRQ_GUEST_VISIBLE 2 +#define GIC_IRQ_GUEST_ENABLED 3 +#define GIC_IRQ_GUEST_MIGRATING 4 +#define GIC_IRQ_GUEST_PRISTINE_LPI 5 + unsigned long status; + struct irq_desc *desc; /* only set if the irq corresponds to a physical irq */ + unsigned int irq; +#define GIC_INVALID_LR (uint8_t)~0 + uint8_t lr; + uint8_t priority; + uint8_t lpi_priority; /* Caches the priority if this is an LPI. */ + uint8_t lpi_vcpu_id; /* The VCPU for an LPI. */ + /* inflight is used to append instances of pending_irq to + * vgic.inflight_irqs */ + struct list_head inflight; + /* lr_queue is used to append instances of pending_irq to + * lr_pending. lr_pending is a per vcpu queue, therefore lr_queue + * accesses are protected with the vgic lock. + * TODO: when implementing irq migration, taking only the current + * vgic lock is not going to be enough. */ + struct list_head lr_queue; +}; + +#define NR_INTERRUPT_PER_RANK 32 +#define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1) + +/* Represents state corresponding to a block of 32 interrupts */ +struct vgic_irq_rank { + spinlock_t lock; /* Covers access to all other members of this struct */ + + uint8_t index; + + uint32_t ienable; + uint32_t icfg[2]; + + /* + * Provide efficient access to the priority of an vIRQ while keeping + * the emulation simple. + * Note, this is working fine as long as Xen is using little endian. + */ + union { + uint8_t priority[32]; + uint32_t ipriorityr[8]; + }; + + /* + * It's more convenient to store a target VCPU per vIRQ + * than the register ITARGETSR/IROUTER itself. + * Use atomic operations to read/write the vcpu fields to avoid + * taking the rank lock. + */ + uint8_t vcpu[32]; +}; + +struct vgic_dist { + /* Version of the vGIC */ + enum gic_version version; + /* GIC HW version specific vGIC driver handler */ + const struct vgic_ops *handler; + /* + * Covers access to other members of this struct _except_ for + * shared_irqs where each member contains its own locking. + * + * If both class of lock is required then this lock must be + * taken first. If multiple rank locks are required (including + * the per-vcpu private_irqs rank) then they must be taken in + * rank order. + */ + spinlock_t lock; + uint32_t ctlr; + int nr_spis; /* Number of SPIs */ + unsigned long *allocated_irqs; /* bitmap of IRQs allocated */ + struct vgic_irq_rank *shared_irqs; + /* + * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in + * struct arch_vcpu. + */ + struct pending_irq *pending_irqs; + /* Base address for guest GIC */ + paddr_t dbase; /* Distributor base address */ +#ifdef CONFIG_GICV3 + /* GIC V3 addressing */ + /* List of contiguous occupied by the redistributors */ + struct vgic_rdist_region { + paddr_t base; /* Base address */ + paddr_t size; /* Size */ + unsigned int first_cpu; /* First CPU handled */ + } *rdist_regions; + int nr_regions; /* Number of rdist regions */ + unsigned long int nr_lpis; + uint64_t rdist_propbase; + struct rb_root its_devices; /* Devices mapped to an ITS */ + spinlock_t its_devices_lock; /* Protects the its_devices tree */ + struct radix_tree_root pend_lpi_tree; /* Stores struct pending_irq's */ + rwlock_t pend_lpi_tree_lock; /* Protects the pend_lpi_tree */ + struct list_head vits_list; /* List of virtual ITSes */ + unsigned int intid_bits; + /* + * TODO: if there are more bool's being added below, consider + * a flags variable instead. + */ + bool rdists_enabled; /* Is any redistributor enabled? */ + bool has_its; +#endif +}; + +struct vgic_cpu { + /* + * SGIs and PPIs are per-VCPU, SPIs are domain global and in + * struct arch_domain. + */ + struct pending_irq pending_irqs[32]; + struct vgic_irq_rank *private_irqs; + + /* This list is ordered by IRQ priority and it is used to keep + * track of the IRQs that the VGIC injected into the guest. + * Depending on the availability of LR registers, the IRQs might + * actually be in an LR, and therefore injected into the guest, + * or queued in gic.lr_pending. + * As soon as an IRQ is EOI'd by the guest and removed from the + * corresponding LR it is also removed from this list. */ + struct list_head inflight_irqs; + /* lr_pending is used to queue IRQs (struct pending_irq) that the + * vgic tried to inject in the guest (calling gic_raise_guest_irq) but + * no LRs were available at the time. + * As soon as an LR is freed we remove the first IRQ from this + * list and write it to the LR register. + * lr_pending is a subset of vgic.inflight_irqs. */ + struct list_head lr_pending; + spinlock_t lock; + + /* GICv3: redistributor base and flags for this vCPU */ + paddr_t rdist_base; + uint64_t rdist_pendbase; +#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ +#define VGIC_V3_LPIS_ENABLED (1 << 1) + uint8_t flags; +}; + +struct sgi_target { + uint8_t aff1; + uint16_t list; +}; + +static inline void sgi_target_init(struct sgi_target *sgi_target) +{ + sgi_target->aff1 = 0; + sgi_target->list = 0; +} + +struct vgic_ops { + /* Initialize vGIC */ + int (*vcpu_init)(struct vcpu *v); + /* Domain specific initialization of vGIC */ + int (*domain_init)(struct domain *d); + /* Release resources that were allocated by domain_init */ + void (*domain_free)(struct domain *d); + /* vGIC sysreg/cpregs emulate */ + bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr); + /* lookup the struct pending_irq for a given LPI interrupt */ + struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int vlpi); + int (*lpi_get_priority)(struct domain *d, uint32_t vlpi); +}; + +/* Number of ranks of interrupt registers for a domain */ +#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_spis+31)/32) + +#define vgic_lock(v) spin_lock_irq(&(v)->domain->arch.vgic.lock) +#define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock) + +#define vgic_lock_rank(v, r, flags) spin_lock_irqsave(&(r)->lock, flags) +#define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags) + +/* + * Rank containing GICD_ for GICD_ with + * -bits-per-interrupt + */ +static inline int REG_RANK_NR(int b, uint32_t n) +{ + switch ( b ) + { + /* + * IRQ ranks are of size 32. So n cannot be shifted beyond 5 for 32 + * and above. For 64-bit n is already shifted DBAT_DOUBLE_WORD + * by the caller + */ + case 64: + case 32: return n >> 5; + case 16: return n >> 4; + case 8: return n >> 3; + case 4: return n >> 2; + case 2: return n >> 1; + case 1: return n; + default: BUG(); + } +} + +enum gic_sgi_mode; + +/* + * Offset of GICD_ with its rank, for GICD_ size with + * -bits-per-interrupt. + */ +#define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32) + + +extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq); +extern void vgic_remove_irq_from_queues(struct vcpu *v, struct pending_irq *p); +extern void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p); +extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq); +extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq); +extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq); +extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, int s); +extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq); +extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n); +extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n); +extern void vgic_set_irqs_pending(struct vcpu *v, uint32_t r, + unsigned int rank); +extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops); +int vgic_v2_init(struct domain *d, int *mmio_count); +int vgic_v3_init(struct domain *d, int *mmio_count); + +extern bool vgic_to_sgi(struct vcpu *v, register_t sgir, + enum gic_sgi_mode irqmode, int virq, + const struct sgi_target *target); +extern bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq); +extern void vgic_check_inflight_irqs_pending(struct domain *d, struct vcpu *v, + unsigned int rank, uint32_t r); + +#endif /* !CONFIG_NEW_VGIC */ + +/*** Common VGIC functions used by Xen arch code ****/ + +/* + * In the moment vgic_num_irqs() just covers SPIs and the private IRQs, + * as it's mostly used for allocating the pending_irq and irq_desc array, + * in which LPIs don't participate. + */ +#define vgic_num_irqs(d) ((d)->arch.vgic.nr_spis + 32) + +/* + * Allocate a guest VIRQ + * - spi == 0 => allocate a PPI. It will be the same on every vCPU + * - spi == 1 => allocate an SPI + */ +extern int vgic_allocate_virq(struct domain *d, bool spi); +/* Reserve a specific guest vIRQ */ +extern bool vgic_reserve_virq(struct domain *d, unsigned int virq); +extern void vgic_free_virq(struct domain *d, unsigned int virq); + +static inline int vgic_allocate_ppi(struct domain *d) +{ + return vgic_allocate_virq(d, false /* ppi */); +} + +static inline int vgic_allocate_spi(struct domain *d) +{ + return vgic_allocate_virq(d, true /* spi */); +} + +struct irq_desc *vgic_get_hw_irq_desc(struct domain *d, struct vcpu *v, + unsigned int virq); +int vgic_connect_hw_irq(struct domain *d, struct vcpu *v, unsigned int virq, + struct irq_desc *desc, bool connect); + +bool vgic_evtchn_irq_pending(struct vcpu *v); + +int domain_vgic_register(struct domain *d, int *mmio_count); +int domain_vgic_init(struct domain *d, unsigned int nr_spis); +void domain_vgic_free(struct domain *d); +int vcpu_vgic_init(struct vcpu *vcpu); +int vcpu_vgic_free(struct vcpu *vcpu); + +void vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq, + bool level); + +extern void vgic_clear_pending_irqs(struct vcpu *v); + +extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr); + +/* Maximum vCPUs for a specific vGIC version, or 0 for unsupported. */ +unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version); + +void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t csize, + paddr_t vbase, uint32_t aliased_offset); + +#ifdef CONFIG_GICV3 +struct rdist_region; +void vgic_v3_setup_hw(paddr_t dbase, + unsigned int nr_rdist_regions, + const struct rdist_region *regions, + unsigned int intid_bits); +#endif + +void vgic_sync_to_lrs(void); +void vgic_sync_from_lrs(struct vcpu *v); + +int vgic_vcpu_pending_irq(struct vcpu *v); + +#endif /* __ASM_ARM_VGIC_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/vm_event.h b/xen/arch/arm/include/asm/vm_event.h new file mode 100644 index 0000000000..abe7db1970 --- /dev/null +++ b/xen/arch/arm/include/asm/vm_event.h @@ -0,0 +1,67 @@ +/* + * vm_event.h: architecture specific vm_event handling routines + * + * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_ARM_VM_EVENT_H__ +#define __ASM_ARM_VM_EVENT_H__ + +#include +#include +#include + +static inline int vm_event_init_domain(struct domain *d) +{ + /* Nothing to do. */ + return 0; +} + +static inline void vm_event_cleanup_domain(struct domain *d) +{ + memset(&d->monitor, 0, sizeof(d->monitor)); +} + +static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, + vm_event_response_t *rsp) +{ + /* Not supported on ARM. */ +} + +static inline +void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) +{ + /* Not supported on ARM. */ +} + +static inline +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) +{ + /* Not supported on ARM. */ +} + +static inline +void vm_event_sync_event(struct vcpu *v, bool value) +{ + /* Not supported on ARM. */ +} + +static inline +void vm_event_reset_vmtrace(struct vcpu *v) +{ + /* Not supported on ARM. */ +} + +#endif /* __ASM_ARM_VM_EVENT_H__ */ diff --git a/xen/arch/arm/include/asm/vpl011.h b/xen/arch/arm/include/asm/vpl011.h new file mode 100644 index 0000000000..e6c7ab7381 --- /dev/null +++ b/xen/arch/arm/include/asm/vpl011.h @@ -0,0 +1,89 @@ +/* + * include/xen/vpl011.h + * + * Virtual PL011 UART + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef _VPL011_H_ +#define _VPL011_H_ + +#include +#include +#include +#include + +/* helper macros */ +#define VPL011_LOCK(d,flags) spin_lock_irqsave(&(d)->arch.vpl011.lock, flags) +#define VPL011_UNLOCK(d,flags) spin_unlock_irqrestore(&(d)->arch.vpl011.lock, flags) + +#define SBSA_UART_FIFO_SIZE 32 +/* Same size as VUART_BUF_SIZE, used in vuart.c */ +#define SBSA_UART_OUT_BUF_SIZE 128 +struct vpl011_xen_backend { + char in[SBSA_UART_FIFO_SIZE]; + char out[SBSA_UART_OUT_BUF_SIZE]; + XENCONS_RING_IDX in_cons, in_prod; + XENCONS_RING_IDX out_prod; +}; + +struct vpl011 { + bool backend_in_domain; + union { + struct { + void *ring_buf; + struct page_info *ring_page; + } dom; + struct vpl011_xen_backend *xen; + } backend; + uint32_t uartfr; /* Flag register */ + uint32_t uartcr; /* Control register */ + uint32_t uartimsc; /* Interrupt mask register*/ + uint32_t uarticr; /* Interrupt clear register */ + uint32_t uartris; /* Raw interrupt status register */ + uint32_t shadow_uartmis; /* shadow masked interrupt register */ + spinlock_t lock; + evtchn_port_t evtchn; +}; + +struct vpl011_init_info { + domid_t console_domid; + gfn_t gfn; + evtchn_port_t evtchn; +}; + +#ifdef CONFIG_SBSA_VUART_CONSOLE +int domain_vpl011_init(struct domain *d, + struct vpl011_init_info *info); +void domain_vpl011_deinit(struct domain *d); +void vpl011_rx_char_xen(struct domain *d, char c); +#else +static inline int domain_vpl011_init(struct domain *d, + struct vpl011_init_info *info) +{ + return -ENOSYS; +} + +static inline void domain_vpl011_deinit(struct domain *d) { } +#endif +#endif /* _VPL011_H_ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/vpsci.h b/xen/arch/arm/include/asm/vpsci.h new file mode 100644 index 0000000000..0cca5e6830 --- /dev/null +++ b/xen/arch/arm/include/asm/vpsci.h @@ -0,0 +1,42 @@ +/* + * xen/include/asm-arm/vpsci.h + * + * Julien Grall + * Copyright (c) 2018 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef __ASM_VPSCI_H__ +#define __ASM_VPSCI_H__ + +#include + +/* Number of function implemented by virtual PSCI (only 0.2 or later) */ +#define VPSCI_NR_FUNCS 12 + +/* Functions handle PSCI calls from the guests */ +bool do_vpsci_0_1_call(struct cpu_user_regs *regs, uint32_t fid); +bool do_vpsci_0_2_call(struct cpu_user_regs *regs, uint32_t fid); + +#endif /* __ASM_VPSCI_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/vreg.h b/xen/arch/arm/include/asm/vreg.h new file mode 100644 index 0000000000..fa2f4cdb17 --- /dev/null +++ b/xen/arch/arm/include/asm/vreg.h @@ -0,0 +1,196 @@ +/* + * Helpers to emulate co-processor and system registers + */ +#ifndef __ASM_ARM_VREG__ +#define __ASM_ARM_VREG__ + +typedef bool (*vreg_reg64_fn_t)(struct cpu_user_regs *regs, uint64_t *r, + bool read); +typedef bool (*vreg_reg_fn_t)(struct cpu_user_regs *regs, register_t *r, + bool read); + +static inline bool vreg_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr, + vreg_reg_fn_t fn) +{ + struct hsr_cp32 cp32 = hsr.cp32; + /* + * Initialize to zero to avoid leaking data if there is an + * implementation error in the emulation (such as not correctly + * setting r). + */ + register_t r = 0; + bool ret; + + if ( !cp32.read ) + r = get_user_reg(regs, cp32.reg); + + ret = fn(regs, &r, cp32.read); + + if ( ret && cp32.read ) + set_user_reg(regs, cp32.reg, r); + + return ret; +} + +static inline bool vreg_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr, + vreg_reg64_fn_t fn) +{ + struct hsr_cp64 cp64 = hsr.cp64; + /* + * Initialize to zero to avoid leaking data if there is an + * implementation error in the emulation (such as not correctly + * setting x). + */ + uint64_t x = 0; + bool ret; + + if ( !cp64.read ) + { + uint32_t r1 = get_user_reg(regs, cp64.reg1); + uint32_t r2 = get_user_reg(regs, cp64.reg2); + + x = (uint64_t)r1 | ((uint64_t)r2 << 32); + } + + ret = fn(regs, &x, cp64.read); + + if ( ret && cp64.read ) + { + set_user_reg(regs, cp64.reg1, x & 0xffffffff); + set_user_reg(regs, cp64.reg2, x >> 32); + } + + return ret; +} + +#ifdef CONFIG_ARM_64 +static inline bool vreg_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr, + vreg_reg_fn_t fn) +{ + struct hsr_sysreg sysreg = hsr.sysreg; + register_t r = 0; + bool ret; + + if ( !sysreg.read ) + r = get_user_reg(regs, sysreg.reg); + + ret = fn(regs, &r, sysreg.read); + + if ( ret && sysreg.read ) + set_user_reg(regs, sysreg.reg, r); + + return ret; +} +#endif + +#define VREG_REG_MASK(size) ((~0UL) >> (BITS_PER_LONG - ((1 << (size)) * 8))) + +/* + * The check on the size supported by the register has to be done by + * the caller of vreg_regN_*. + * + * vreg_reg_* should never be called directly. Instead use the vreg_regN_* + * according to size of the emulated register + * + * Note that the alignment fault will always be taken in the guest + * (see B3.12.7 DDI0406.b). + */ +static inline register_t vreg_reg_extract(unsigned long reg, + unsigned int offset, + enum dabt_size size) +{ + reg >>= 8 * offset; + reg &= VREG_REG_MASK(size); + + return reg; +} + +static inline void vreg_reg_update(unsigned long *reg, register_t val, + unsigned int offset, + enum dabt_size size) +{ + unsigned long mask = VREG_REG_MASK(size); + int shift = offset * 8; + + *reg &= ~(mask << shift); + *reg |= ((unsigned long)val & mask) << shift; +} + +static inline void vreg_reg_setbits(unsigned long *reg, register_t bits, + unsigned int offset, + enum dabt_size size) +{ + unsigned long mask = VREG_REG_MASK(size); + int shift = offset * 8; + + *reg |= ((unsigned long)bits & mask) << shift; +} + +static inline void vreg_reg_clearbits(unsigned long *reg, register_t bits, + unsigned int offset, + enum dabt_size size) +{ + unsigned long mask = VREG_REG_MASK(size); + int shift = offset * 8; + + *reg &= ~(((unsigned long)bits & mask) << shift); +} + +/* N-bit register helpers */ +#define VREG_REG_HELPERS(sz, offmask) \ +static inline register_t vreg_reg##sz##_extract(uint##sz##_t reg, \ + const mmio_info_t *info)\ +{ \ + return vreg_reg_extract(reg, info->gpa & offmask, \ + info->dabt.size); \ +} \ + \ +static inline void vreg_reg##sz##_update(uint##sz##_t *reg, \ + register_t val, \ + const mmio_info_t *info) \ +{ \ + unsigned long tmp = *reg; \ + \ + vreg_reg_update(&tmp, val, info->gpa & offmask, \ + info->dabt.size); \ + \ + *reg = tmp; \ +} \ + \ +static inline void vreg_reg##sz##_setbits(uint##sz##_t *reg, \ + register_t bits, \ + const mmio_info_t *info) \ +{ \ + unsigned long tmp = *reg; \ + \ + vreg_reg_setbits(&tmp, bits, info->gpa & offmask, \ + info->dabt.size); \ + \ + *reg = tmp; \ +} \ + \ +static inline void vreg_reg##sz##_clearbits(uint##sz##_t *reg, \ + register_t bits, \ + const mmio_info_t *info) \ +{ \ + unsigned long tmp = *reg; \ + \ + vreg_reg_clearbits(&tmp, bits, info->gpa & offmask, \ + info->dabt.size); \ + \ + *reg = tmp; \ +} + +/* + * 64 bits registers are only supported on platform with 64-bit long. + * This is also allow us to optimize the 32 bit case by using + * unsigned long rather than uint64_t + */ +#if BITS_PER_LONG == 64 +VREG_REG_HELPERS(64, 0x7); +#endif +VREG_REG_HELPERS(32, 0x3); + +#undef VREG_REG_HELPERS + +#endif /* __ASM_ARM_VREG__ */ diff --git a/xen/arch/arm/include/asm/vtimer.h b/xen/arch/arm/include/asm/vtimer.h new file mode 100644 index 0000000000..9d4fb4c6e8 --- /dev/null +++ b/xen/arch/arm/include/asm/vtimer.h @@ -0,0 +1,41 @@ +/* + * xen/arch/arm/vtimer.h + * + * ARM Virtual Timer emulation support + * + * Ian Campbell + * Copyright (c) 2011 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ARCH_ARM_VTIMER_H__ +#define __ARCH_ARM_VTIMER_H__ + +extern int domain_vtimer_init(struct domain *d, + struct xen_arch_domainconfig *config); +extern int vcpu_vtimer_init(struct vcpu *v); +extern bool vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr); +extern void virt_timer_save(struct vcpu *v); +extern void virt_timer_restore(struct vcpu *v); +extern void vcpu_timer_destroy(struct vcpu *v); +void vtimer_update_irqs(struct vcpu *v); + +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/include/asm/xenoprof.h b/xen/arch/arm/include/asm/xenoprof.h new file mode 100644 index 0000000000..3db6ce3ab2 --- /dev/null +++ b/xen/arch/arm/include/asm/xenoprof.h @@ -0,0 +1,12 @@ +#ifndef __ASM_XENOPROF_H__ +#define __ASM_XENOPROF_H__ + +#endif /* __ASM_XENOPROF_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index 60c0e82fc5..7bfd0a73a7 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -46,7 +46,7 @@ struct cpuinfo_arm cpu_data[NR_CPUS]; /* CPU logical map: map xen cpuid to an MPIDR */ register_t __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; -/* Fake one node for now. See also include/asm-arm/numa.h */ +/* Fake one node for now. See also asm/numa.h */ nodemask_t __read_mostly node_online_map = { { [0] = 1UL } }; /* Xen stack for bringing up the first CPU. */ diff --git a/xen/arch/arm/vpsci.c b/xen/arch/arm/vpsci.c index c1e250be59..744d43ec27 100644 --- a/xen/arch/arm/vpsci.c +++ b/xen/arch/arm/vpsci.c @@ -278,7 +278,7 @@ bool do_vpsci_0_1_call(struct cpu_user_regs *regs, uint32_t fid) bool do_vpsci_0_2_call(struct cpu_user_regs *regs, uint32_t fid) { /* - * /!\ VPSCI_NR_FUNCS (in asm-arm/vpsci.h) should be updated when + * /!\ VPSCI_NR_FUNCS (in asm/vpsci.h) should be updated when * adding/removing a function. SCCC_SMCCC_*_REVISION should be * updated once per release. */ diff --git a/xen/arch/riscv/arch.mk b/xen/arch/riscv/arch.mk index 53dadb8975..39ae6ffea9 100644 --- a/xen/arch/riscv/arch.mk +++ b/xen/arch/riscv/arch.mk @@ -12,3 +12,4 @@ riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c CFLAGS += -march=$(riscv-march-y) -mstrict-align -mcmodel=medany CFLAGS += -I$(BASEDIR)/include +CFLAGS += -I$(BASEDIR)/arch/$(TARGET_ARCH)/include diff --git a/xen/arch/riscv/include/asm/config.h b/xen/arch/riscv/include/asm/config.h new file mode 100644 index 0000000000..e2ae21de61 --- /dev/null +++ b/xen/arch/riscv/include/asm/config.h @@ -0,0 +1,47 @@ +#ifndef __RISCV_CONFIG_H__ +#define __RISCV_CONFIG_H__ + +#if defined(CONFIG_RISCV_64) +# define LONG_BYTEORDER 3 +# define ELFSIZE 64 +# define MAX_VIRT_CPUS 128u +#else +# error "Unsupported RISCV variant" +#endif + +#define BYTES_PER_LONG (1 << LONG_BYTEORDER) +#define BITS_PER_LONG (BYTES_PER_LONG << 3) +#define POINTER_ALIGN BYTES_PER_LONG + +#define BITS_PER_LLONG 64 + +/* xen_ulong_t is always 64 bits */ +#define BITS_PER_XEN_ULONG 64 + +#define CONFIG_RISCV_L1_CACHE_SHIFT 6 +#define CONFIG_PAGEALLOC_MAX_ORDER 18 +#define CONFIG_DOMU_MAX_ORDER 9 +#define CONFIG_HWDOM_MAX_ORDER 10 + +#define OPT_CONSOLE_STR "dtuart" +#define INVALID_VCPU_ID MAX_VIRT_CPUS + +/* Linkage for RISCV */ +#ifdef __ASSEMBLY__ +#define ALIGN .align 2 + +#define ENTRY(name) \ + .globl name; \ + ALIGN; \ + name: +#endif + +#endif /* __RISCV_CONFIG_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index 669e16e726..8db4cb98ed 100644 --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -273,11 +273,11 @@ efi/buildid.o efi/relocs-dummy.o: $(BASEDIR)/arch/x86/efi/built_in.o efi/buildid.o efi/relocs-dummy.o: ; .PHONY: include -include: $(BASEDIR)/include/asm-x86/asm-macros.h +include: $(BASEDIR)/arch/x86/include/asm/asm-macros.h asm-macros.i: CFLAGS-y += -D__ASSEMBLY__ -P -$(BASEDIR)/include/asm-x86/asm-macros.h: asm-macros.i Makefile +$(BASEDIR)/arch/x86/include/asm/asm-macros.h: asm-macros.i Makefile echo '#if 0' >$@.new echo '.if 0' >>$@.new echo '#endif' >>$@.new @@ -304,7 +304,7 @@ efi/mkreloc: efi/mkreloc.c .PHONY: clean clean:: rm -f *.lds *.new boot/*.o boot/*~ boot/core boot/mkelf32 - rm -f asm-macros.i $(BASEDIR)/include/asm-x86/asm-macros.* + rm -f asm-macros.i $(BASEDIR)/arch/x86/include/asm/asm-macros.* rm -f $(BASEDIR)/.xen-syms.[0-9]* boot/.*.d $(BASEDIR)/.xen.elf32 rm -f $(BASEDIR)/.xen.efi.[0-9]* efi/*.efi efi/mkreloc rm -f boot/cmdline.S boot/reloc.S boot/*.lnk boot/*.bin diff --git a/xen/arch/x86/arch.mk b/xen/arch/x86/arch.mk index ce0c1a0e7f..eea320e618 100644 --- a/xen/arch/x86/arch.mk +++ b/xen/arch/x86/arch.mk @@ -4,8 +4,9 @@ export XEN_IMG_OFFSET := 0x200000 CFLAGS += -I$(BASEDIR)/include -CFLAGS += -I$(BASEDIR)/include/asm-x86/mach-generic -CFLAGS += -I$(BASEDIR)/include/asm-x86/mach-default +CFLAGS += -I$(BASEDIR)/arch/$(TARGET_ARCH)/include +CFLAGS += -I$(BASEDIR)/arch/x86/include/asm/mach-generic +CFLAGS += -I$(BASEDIR)/arch/x86/include/asm/mach-default CFLAGS += -DXEN_IMG_OFFSET=$(XEN_IMG_OFFSET) # Prevent floating-point variables from creeping into Xen. diff --git a/xen/arch/x86/include/asm/acpi.h b/xen/arch/x86/include/asm/acpi.h new file mode 100644 index 0000000000..9a9cc4c240 --- /dev/null +++ b/xen/arch/x86/include/asm/acpi.h @@ -0,0 +1,162 @@ +#ifndef _ASM_X86_ACPI_H +#define _ASM_X86_ACPI_H + +/* + * Copyright (C) 2001 Paul Diefenbaugh + * Copyright (C) 2001 Patrick Mochel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include + +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_FLUSH_CPU_CACHE() wbinvd() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + asm("divl %2;" \ + :"=a"(q32), "=d"(r32) \ + :"r"(d32), \ + "0"(n_lo), "1"(n_hi)) + + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + asm("shrl $1,%2 ;" \ + "rcrl $1,%3;" \ + :"=r"(n_hi), "=r"(n_lo) \ + :"0"(n_hi), "1"(n_lo)) + +extern bool acpi_lapic, acpi_ioapic, acpi_noirq; +extern bool acpi_force, acpi_ht, acpi_disabled; +extern u32 acpi_smi_cmd; +extern u8 acpi_enable_value, acpi_disable_value; +void acpi_pic_sci_set_trigger(unsigned int, u16); + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_ht = 0; + acpi_noirq = 1; +} + +static inline void acpi_noirq_set(void) { acpi_noirq = 1; } + +/* routines for saving/restoring kernel state */ +extern int acpi_save_state_mem(void); +extern int acpi_save_state_disk(void); +extern void acpi_restore_state_mem(void); + +extern unsigned long acpi_wakeup_address; + +#define ARCH_HAS_POWER_INIT 1 + +extern s8 acpi_numa; +extern int acpi_scan_nodes(u64 start, u64 end); +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) + +extern struct acpi_sleep_info acpi_sinfo; +#define acpi_video_flags bootsym(video_flags) +struct xenpf_enter_acpi_sleep; +extern int acpi_enter_sleep(struct xenpf_enter_acpi_sleep *sleep); +extern int acpi_enter_state(u32 state); + +struct acpi_sleep_info { + struct acpi_generic_address pm1a_cnt_blk; + struct acpi_generic_address pm1b_cnt_blk; + struct acpi_generic_address pm1a_evt_blk; + struct acpi_generic_address pm1b_evt_blk; + struct acpi_generic_address sleep_control; + struct acpi_generic_address sleep_status; + union { + uint16_t pm1a_cnt_val; + uint8_t sleep_type_a; + }; + union { + uint16_t pm1b_cnt_val; + uint8_t sleep_type_b; + }; + uint32_t sleep_state; + uint64_t wakeup_vector; + uint32_t vector_width; + bool_t sleep_extended; +}; + +#define MAX_MADT_ENTRIES MAX(256, 2 * NR_CPUS) +extern u32 x86_acpiid_to_apicid[]; +#define MAX_LOCAL_APIC MAX(256, 4 * NR_CPUS) + +#define INVALID_ACPIID (-1U) + +extern u32 pmtmr_ioport; +extern unsigned int pmtmr_width; + +void acpi_iommu_init(void); +int acpi_dmar_init(void); +int acpi_ivrs_init(void); + +void acpi_mmcfg_init(void); + +/* Incremented whenever we transition through S3. Value is 1 during boot. */ +extern uint32_t system_reset_counter; + +void hvm_acpi_power_button(struct domain *d); +void hvm_acpi_sleep_button(struct domain *d); + +/* suspend/resume */ +void save_rest_processor_state(void); +void restore_rest_processor_state(void); + +#define ACPI_MAP_MEM_ATTR PAGE_HYPERVISOR_UCMINUS + +#endif /*__X86_ASM_ACPI_H*/ diff --git a/xen/arch/x86/include/asm/alternative-asm.h b/xen/arch/x86/include/asm/alternative-asm.h new file mode 100644 index 0000000000..e6c42d721d --- /dev/null +++ b/xen/arch/x86/include/asm/alternative-asm.h @@ -0,0 +1,125 @@ +#ifndef _ASM_X86_ALTERNATIVE_ASM_H_ +#define _ASM_X86_ALTERNATIVE_ASM_H_ + +#include + +#ifdef __ASSEMBLY__ + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro altinstruction_entry orig repl feature orig_len repl_len pad_len + .long \orig - . + .long \repl - . + .word \feature + .byte \orig_len + .byte \repl_len + .byte \pad_len + .byte 0 /* priv */ +.endm + +.macro mknops nr_bytes +#ifdef HAVE_AS_NOPS_DIRECTIVE + .nops \nr_bytes, ASM_NOP_MAX +#else + .skip \nr_bytes, 0x90 +#endif +.endm + +/* GAS's idea of true is -1, while Clang's idea is 1. */ +#ifdef HAVE_AS_NEGATIVE_TRUE +# define as_true(x) (-(x)) +#else +# define as_true(x) (x) +#endif + +#define decl_orig(insn, padding) \ + .L\@_orig_s: insn; .L\@_orig_e: \ + .L\@_diff = padding; \ + mknops (as_true(.L\@_diff > 0) * .L\@_diff); \ + .L\@_orig_p: + +#define orig_len (.L\@_orig_e - .L\@_orig_s) +#define pad_len (.L\@_orig_p - .L\@_orig_e) +#define total_len (.L\@_orig_p - .L\@_orig_s) + +#define decl_repl(insn, nr) .L\@_repl_s\()nr: insn; .L\@_repl_e\()nr: +#define repl_len(nr) (.L\@_repl_e\()nr - .L\@_repl_s\()nr) + +#define as_max(a, b) ((a) ^ (((a) ^ (b)) & -as_true((a) < (b)))) + +.macro ALTERNATIVE oldinstr, newinstr, feature + decl_orig(\oldinstr, repl_len(1) - orig_len) + + .pushsection .altinstructions, "a", @progbits + altinstruction_entry .L\@_orig_s, .L\@_repl_s1, \feature, \ + orig_len, repl_len(1), pad_len + + .section .discard, "a", @progbits + /* + * Assembler-time checks: + * - total_len <= 255 + * - \newinstr <= total_len + */ + .byte total_len + .byte 0xff + repl_len(1) - total_len + + .section .altinstr_replacement, "ax", @progbits + + decl_repl(\newinstr, 1) + + .popsection +.endm + +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 + decl_orig(\oldinstr, as_max(repl_len(1), repl_len(2)) - orig_len) + + .pushsection .altinstructions, "a", @progbits + + altinstruction_entry .L\@_orig_s, .L\@_repl_s1, \feature1, \ + orig_len, repl_len(1), pad_len + altinstruction_entry .L\@_orig_s, .L\@_repl_s2, \feature2, \ + orig_len, repl_len(2), pad_len + + .section .discard, "a", @progbits + /* + * Assembler-time checks: + * - total_len <= 255 + * - \newinstr* <= total_len + */ + .byte total_len + .byte 0xff + repl_len(1) - total_len + .byte 0xff + repl_len(2) - total_len + + .section .altinstr_replacement, "ax", @progbits + + decl_repl(\newinstr1, 1) + decl_repl(\newinstr2, 2) + + .popsection +.endm + +#undef as_max +#undef repl_len +#undef decl_repl +#undef total_len +#undef pad_len +#undef orig_len +#undef decl_orig +#undef as_true + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_ALTERNATIVE_ASM_H_ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/alternative.h b/xen/arch/x86/include/asm/alternative.h new file mode 100644 index 0000000000..a7a82c2c03 --- /dev/null +++ b/xen/arch/x86/include/asm/alternative.h @@ -0,0 +1,387 @@ +#ifndef __X86_ALTERNATIVE_H__ +#define __X86_ALTERNATIVE_H__ + +#ifdef __ASSEMBLY__ +#include +#else +#include +#include +#include + +struct __packed alt_instr { + int32_t orig_offset; /* original instruction */ + int32_t repl_offset; /* offset to replacement instruction */ + uint16_t cpuid; /* cpuid bit set for replacement */ + uint8_t orig_len; /* length of original instruction */ + uint8_t repl_len; /* length of new instruction */ + uint8_t pad_len; /* length of build-time padding */ + uint8_t priv; /* Private, for use by apply_alternatives() */ +}; + +#define __ALT_PTR(a,f) ((uint8_t *)((void *)&(a)->f + (a)->f)) +#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) +#define ALT_REPL_PTR(a) __ALT_PTR(a, repl_offset) + +extern void add_nops(void *insns, unsigned int len); +/* Similar to alternative_instructions except it can be run with IRQs enabled. */ +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); +extern void alternative_instructions(void); +extern void alternative_branches(void); + +#define alt_orig_len "(.LXEN%=_orig_e - .LXEN%=_orig_s)" +#define alt_pad_len "(.LXEN%=_orig_p - .LXEN%=_orig_e)" +#define alt_total_len "(.LXEN%=_orig_p - .LXEN%=_orig_s)" +#define alt_repl_s(num) ".LXEN%=_repl_s"#num +#define alt_repl_e(num) ".LXEN%=_repl_e"#num +#define alt_repl_len(num) "(" alt_repl_e(num) " - " alt_repl_s(num) ")" + +/* GAS's idea of true is -1, while Clang's idea is 1. */ +#ifdef HAVE_AS_NEGATIVE_TRUE +# define AS_TRUE "-" +#else +# define AS_TRUE "" +#endif + +#define as_max(a, b) "(("a") ^ ((("a") ^ ("b")) & -("AS_TRUE"(("a") < ("b")))))" + +#define OLDINSTR(oldinstr, padding) \ + ".LXEN%=_orig_s:\n\t" oldinstr "\n .LXEN%=_orig_e:\n\t" \ + ".LXEN%=_diff = " padding "\n\t" \ + "mknops ("AS_TRUE"(.LXEN%=_diff > 0) * .LXEN%=_diff)\n\t" \ + ".LXEN%=_orig_p:\n\t" + +#define OLDINSTR_1(oldinstr, n1) \ + OLDINSTR(oldinstr, alt_repl_len(n1) "-" alt_orig_len) + +#define OLDINSTR_2(oldinstr, n1, n2) \ + OLDINSTR(oldinstr, \ + as_max(alt_repl_len(n1), \ + alt_repl_len(n2)) "-" alt_orig_len) + +#define ALTINSTR_ENTRY(feature, num) \ + " .long .LXEN%=_orig_s - .\n" /* label */ \ + " .long " alt_repl_s(num)" - .\n" /* new instruction */ \ + " .word " __stringify(feature) "\n" /* feature bit */ \ + " .byte " alt_orig_len "\n" /* source len */ \ + " .byte " alt_repl_len(num) "\n" /* replacement len */ \ + " .byte " alt_pad_len "\n" /* padding len */ \ + " .byte 0\n" /* priv */ + +#define DISCARD_ENTRY(num) /* repl <= total */ \ + " .byte 0xff + (" alt_repl_len(num) ") - (" alt_total_len ")\n" + +#define ALTINSTR_REPLACEMENT(newinstr, num) /* replacement */ \ + alt_repl_s(num)":\n\t" newinstr "\n" alt_repl_e(num) ":\n\t" + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, newinstr, feature) \ + OLDINSTR_1(oldinstr, 1) \ + ".pushsection .altinstructions, \"a\", @progbits\n" \ + ALTINSTR_ENTRY(feature, 1) \ + ".section .discard, \"a\", @progbits\n" \ + ".byte " alt_total_len "\n" /* total_len <= 255 */ \ + DISCARD_ENTRY(1) \ + ".section .altinstr_replacement, \"ax\", @progbits\n" \ + ALTINSTR_REPLACEMENT(newinstr, 1) \ + ".popsection\n" + +#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ + OLDINSTR_2(oldinstr, 1, 2) \ + ".pushsection .altinstructions, \"a\", @progbits\n" \ + ALTINSTR_ENTRY(feature1, 1) \ + ALTINSTR_ENTRY(feature2, 2) \ + ".section .discard, \"a\", @progbits\n" \ + ".byte " alt_total_len "\n" /* total_len <= 255 */ \ + DISCARD_ENTRY(1) \ + DISCARD_ENTRY(2) \ + ".section .altinstr_replacement, \"ax\", @progbits\n" \ + ALTINSTR_REPLACEMENT(newinstr1, 1) \ + ALTINSTR_REPLACEMENT(newinstr2, 2) \ + ".popsection\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") + +#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ + asm volatile (ALTERNATIVE_2(oldinstr, newinstr1, feature1, \ + newinstr2, feature2) \ + : : : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement make sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ + : : input) + +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ + : output : input) + +/* + * This is similar to alternative_io. But it has two features and + * respective instructions. + * + * If CPU has feature2, newinstr2 is used. + * Otherwise, if CPU has feature1, newinstr1 is used. + * Otherwise, oldinstr is used. + */ +#define alternative_io_2(oldinstr, newinstr1, feature1, newinstr2, \ + feature2, output, input...) \ + asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, \ + newinstr2, feature2) \ + : output : input) + +/* Use this macro(s) if you need more than one output parameter. */ +#define ASM_OUTPUT2(a...) a + +/* + * Machinery to allow converting indirect to direct calls, when the called + * function is determined once at boot and later never changed. + */ + +#define ALT_CALL_arg1 "rdi" +#define ALT_CALL_arg2 "rsi" +#define ALT_CALL_arg3 "rdx" +#define ALT_CALL_arg4 "rcx" +#define ALT_CALL_arg5 "r8" +#define ALT_CALL_arg6 "r9" + +#define ALT_CALL_ARG(arg, n) \ + register typeof(arg) a ## n ## _ asm ( ALT_CALL_arg ## n ) = \ + ({ BUILD_BUG_ON(sizeof(arg) > sizeof(void *)); (arg); }) +#define ALT_CALL_NO_ARG(n) \ + register unsigned long a ## n ## _ asm ( ALT_CALL_arg ## n ) + +#define ALT_CALL_NO_ARG6 ALT_CALL_NO_ARG(6) +#define ALT_CALL_NO_ARG5 ALT_CALL_NO_ARG(5); ALT_CALL_NO_ARG6 +#define ALT_CALL_NO_ARG4 ALT_CALL_NO_ARG(4); ALT_CALL_NO_ARG5 +#define ALT_CALL_NO_ARG3 ALT_CALL_NO_ARG(3); ALT_CALL_NO_ARG4 +#define ALT_CALL_NO_ARG2 ALT_CALL_NO_ARG(2); ALT_CALL_NO_ARG3 +#define ALT_CALL_NO_ARG1 ALT_CALL_NO_ARG(1); ALT_CALL_NO_ARG2 + +/* + * Unfortunately ALT_CALL_NO_ARG() above can't use a fake initializer (to + * suppress "uninitialized variable" warnings), as various versions of gcc + * older than 8.1 fall on the nose in various ways with that (always because + * of some other construct elsewhere in the same function needing to use the + * same hard register). Otherwise the asm() below could uniformly use "+r" + * output constraints, making unnecessary all these ALT_CALL_OUT macros. + */ +#define ALT_CALL0_OUT "=r" (a1_), "=r" (a2_), "=r" (a3_), \ + "=r" (a4_), "=r" (a5_), "=r" (a6_) +#define ALT_CALL1_OUT "+r" (a1_), "=r" (a2_), "=r" (a3_), \ + "=r" (a4_), "=r" (a5_), "=r" (a6_) +#define ALT_CALL2_OUT "+r" (a1_), "+r" (a2_), "=r" (a3_), \ + "=r" (a4_), "=r" (a5_), "=r" (a6_) +#define ALT_CALL3_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ + "=r" (a4_), "=r" (a5_), "=r" (a6_) +#define ALT_CALL4_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ + "+r" (a4_), "=r" (a5_), "=r" (a6_) +#define ALT_CALL5_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ + "+r" (a4_), "+r" (a5_), "=r" (a6_) +#define ALT_CALL6_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ + "+r" (a4_), "+r" (a5_), "+r" (a6_) + +#define alternative_callN(n, rettype, func) ({ \ + rettype ret_; \ + register unsigned long r10_ asm("r10"); \ + register unsigned long r11_ asm("r11"); \ + asm volatile (ALTERNATIVE("call *%c[addr](%%rip)", "call .", \ + X86_FEATURE_ALWAYS) \ + : ALT_CALL ## n ## _OUT, "=a" (ret_), \ + "=r" (r10_), "=r" (r11_) ASM_CALL_CONSTRAINT \ + : [addr] "i" (&(func)), "g" (func) \ + : "memory" ); \ + ret_; \ +}) + +#define alternative_vcall0(func) ({ \ + ALT_CALL_NO_ARG1; \ + (void)sizeof(func()); \ + (void)alternative_callN(0, int, func); \ +}) + +#define alternative_call0(func) ({ \ + ALT_CALL_NO_ARG1; \ + alternative_callN(0, typeof(func()), func); \ +}) + +#define alternative_vcall1(func, arg) ({ \ + ALT_CALL_ARG(arg, 1); \ + ALT_CALL_NO_ARG2; \ + (void)sizeof(func(arg)); \ + (void)alternative_callN(1, int, func); \ +}) + +#define alternative_call1(func, arg) ({ \ + ALT_CALL_ARG(arg, 1); \ + ALT_CALL_NO_ARG2; \ + alternative_callN(1, typeof(func(arg)), func); \ +}) + +#define alternative_vcall2(func, arg1, arg2) ({ \ + typeof(arg2) v2_ = (arg2); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_NO_ARG3; \ + (void)sizeof(func(arg1, arg2)); \ + (void)alternative_callN(2, int, func); \ +}) + +#define alternative_call2(func, arg1, arg2) ({ \ + typeof(arg2) v2_ = (arg2); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_NO_ARG3; \ + alternative_callN(2, typeof(func(arg1, arg2)), func); \ +}) + +#define alternative_vcall3(func, arg1, arg2, arg3) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_NO_ARG4; \ + (void)sizeof(func(arg1, arg2, arg3)); \ + (void)alternative_callN(3, int, func); \ +}) + +#define alternative_call3(func, arg1, arg2, arg3) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_NO_ARG4; \ + alternative_callN(3, typeof(func(arg1, arg2, arg3)), \ + func); \ +}) + +#define alternative_vcall4(func, arg1, arg2, arg3, arg4) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + typeof(arg4) v4_ = (arg4); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_ARG(v4_, 4); \ + ALT_CALL_NO_ARG5; \ + (void)sizeof(func(arg1, arg2, arg3, arg4)); \ + (void)alternative_callN(4, int, func); \ +}) + +#define alternative_call4(func, arg1, arg2, arg3, arg4) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + typeof(arg4) v4_ = (arg4); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_ARG(v4_, 4); \ + ALT_CALL_NO_ARG5; \ + alternative_callN(4, typeof(func(arg1, arg2, \ + arg3, arg4)), \ + func); \ +}) + +#define alternative_vcall5(func, arg1, arg2, arg3, arg4, arg5) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + typeof(arg4) v4_ = (arg4); \ + typeof(arg5) v5_ = (arg5); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_ARG(v4_, 4); \ + ALT_CALL_ARG(v5_, 5); \ + ALT_CALL_NO_ARG6; \ + (void)sizeof(func(arg1, arg2, arg3, arg4, arg5)); \ + (void)alternative_callN(5, int, func); \ +}) + +#define alternative_call5(func, arg1, arg2, arg3, arg4, arg5) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + typeof(arg4) v4_ = (arg4); \ + typeof(arg5) v5_ = (arg5); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_ARG(v4_, 4); \ + ALT_CALL_ARG(v5_, 5); \ + ALT_CALL_NO_ARG6; \ + alternative_callN(5, typeof(func(arg1, arg2, arg3, \ + arg4, arg5)), \ + func); \ +}) + +#define alternative_vcall6(func, arg1, arg2, arg3, arg4, arg5, arg6) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + typeof(arg4) v4_ = (arg4); \ + typeof(arg5) v5_ = (arg5); \ + typeof(arg6) v6_ = (arg6); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_ARG(v4_, 4); \ + ALT_CALL_ARG(v5_, 5); \ + ALT_CALL_ARG(v6_, 6); \ + (void)sizeof(func(arg1, arg2, arg3, arg4, arg5, arg6)); \ + (void)alternative_callN(6, int, func); \ +}) + +#define alternative_call6(func, arg1, arg2, arg3, arg4, arg5, arg6) ({ \ + typeof(arg2) v2_ = (arg2); \ + typeof(arg3) v3_ = (arg3); \ + typeof(arg4) v4_ = (arg4); \ + typeof(arg5) v5_ = (arg5); \ + typeof(arg6) v6_ = (arg6); \ + ALT_CALL_ARG(arg1, 1); \ + ALT_CALL_ARG(v2_, 2); \ + ALT_CALL_ARG(v3_, 3); \ + ALT_CALL_ARG(v4_, 4); \ + ALT_CALL_ARG(v5_, 5); \ + ALT_CALL_ARG(v6_, 6); \ + alternative_callN(6, typeof(func(arg1, arg2, arg3, \ + arg4, arg5, arg6)), \ + func); \ +}) + +#define alternative_vcall__(nr) alternative_vcall ## nr +#define alternative_call__(nr) alternative_call ## nr + +#define alternative_vcall_(nr) alternative_vcall__(nr) +#define alternative_call_(nr) alternative_call__(nr) + +#define alternative_vcall(func, args...) \ + alternative_vcall_(count_args(args))(func, ## args) + +#define alternative_call(func, args...) \ + alternative_call_(count_args(args))(func, ## args) + +#endif /* !__ASSEMBLY__ */ + +#endif /* __X86_ALTERNATIVE_H__ */ diff --git a/xen/arch/x86/include/asm/altp2m.h b/xen/arch/x86/include/asm/altp2m.h new file mode 100644 index 0000000000..b206e95863 --- /dev/null +++ b/xen/arch/x86/include/asm/altp2m.h @@ -0,0 +1,57 @@ +/* + * Alternate p2m HVM + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_ALTP2M_H +#define __ASM_X86_ALTP2M_H + +#ifdef CONFIG_HVM + +#include +#include /* for struct vcpu, struct domain */ +#include /* for vcpu_altp2m */ + +/* Alternate p2m HVM on/off per domain */ +static inline bool altp2m_active(const struct domain *d) +{ + return d->arch.altp2m_active; +} + +/* Alternate p2m VCPU */ +void altp2m_vcpu_initialise(struct vcpu *v); +void altp2m_vcpu_destroy(struct vcpu *v); + +int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn); +void altp2m_vcpu_disable_ve(struct vcpu *v); + +static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v) +{ + return vcpu_altp2m(v).p2midx; +} +#else + +static inline bool altp2m_active(const struct domain *d) +{ + return false; +} + +/* Only declaration is needed. DCE will optimise it out when linking. */ +uint16_t altp2m_vcpu_idx(const struct vcpu *v); +void altp2m_vcpu_disable_ve(struct vcpu *v); + +#endif + +#endif /* __ASM_X86_ALTP2M_H */ diff --git a/xen/arch/x86/include/asm/amd.h b/xen/arch/x86/include/asm/amd.h new file mode 100644 index 0000000000..a82382e6bf --- /dev/null +++ b/xen/arch/x86/include/asm/amd.h @@ -0,0 +1,154 @@ +/* + * amd.h - AMD processor specific definitions + */ + +#ifndef __AMD_H__ +#define __AMD_H__ + +#include + +/* CPUID masked for use by AMD-V Extended Migration */ + +/* Family 0Fh, Revision C */ +#define AMD_FEATURES_K8_REV_C_ECX 0 +#define AMD_FEATURES_K8_REV_C_EDX ( \ + cpufeat_mask(X86_FEATURE_FPU) | cpufeat_mask(X86_FEATURE_VME) | \ + cpufeat_mask(X86_FEATURE_DE) | cpufeat_mask(X86_FEATURE_PSE) | \ + cpufeat_mask(X86_FEATURE_TSC) | cpufeat_mask(X86_FEATURE_MSR) | \ + cpufeat_mask(X86_FEATURE_PAE) | cpufeat_mask(X86_FEATURE_MCE) | \ + cpufeat_mask(X86_FEATURE_CX8) | cpufeat_mask(X86_FEATURE_APIC) | \ + cpufeat_mask(X86_FEATURE_SEP) | cpufeat_mask(X86_FEATURE_MTRR) | \ + cpufeat_mask(X86_FEATURE_PGE) | cpufeat_mask(X86_FEATURE_MCA) | \ + cpufeat_mask(X86_FEATURE_CMOV) | cpufeat_mask(X86_FEATURE_PAT) | \ + cpufeat_mask(X86_FEATURE_PSE36) | cpufeat_mask(X86_FEATURE_CLFLUSH)| \ + cpufeat_mask(X86_FEATURE_MMX) | cpufeat_mask(X86_FEATURE_FXSR) | \ + cpufeat_mask(X86_FEATURE_SSE) | cpufeat_mask(X86_FEATURE_SSE2)) +#define AMD_EXTFEATURES_K8_REV_C_ECX 0 +#define AMD_EXTFEATURES_K8_REV_C_EDX ( \ + cpufeat_mask(X86_FEATURE_FPU) | cpufeat_mask(X86_FEATURE_VME) | \ + cpufeat_mask(X86_FEATURE_DE) | cpufeat_mask(X86_FEATURE_PSE) | \ + cpufeat_mask(X86_FEATURE_TSC) | cpufeat_mask(X86_FEATURE_MSR) | \ + cpufeat_mask(X86_FEATURE_PAE) | cpufeat_mask(X86_FEATURE_MCE) | \ + cpufeat_mask(X86_FEATURE_CX8) | cpufeat_mask(X86_FEATURE_APIC) | \ + cpufeat_mask(X86_FEATURE_SYSCALL) | cpufeat_mask(X86_FEATURE_MTRR) | \ + cpufeat_mask(X86_FEATURE_PGE) | cpufeat_mask(X86_FEATURE_MCA) | \ + cpufeat_mask(X86_FEATURE_CMOV) | cpufeat_mask(X86_FEATURE_PAT) | \ + cpufeat_mask(X86_FEATURE_PSE36) | cpufeat_mask(X86_FEATURE_NX) | \ + cpufeat_mask(X86_FEATURE_MMXEXT) | cpufeat_mask(X86_FEATURE_MMX) | \ + cpufeat_mask(X86_FEATURE_FXSR) | cpufeat_mask(X86_FEATURE_LM) | \ + cpufeat_mask(X86_FEATURE_3DNOWEXT) | cpufeat_mask(X86_FEATURE_3DNOW)) + +/* Family 0Fh, Revision D */ +#define AMD_FEATURES_K8_REV_D_ECX AMD_FEATURES_K8_REV_C_ECX +#define AMD_FEATURES_K8_REV_D_EDX AMD_FEATURES_K8_REV_C_EDX +#define AMD_EXTFEATURES_K8_REV_D_ECX (AMD_EXTFEATURES_K8_REV_C_ECX |\ + cpufeat_mask(X86_FEATURE_LAHF_LM)) +#define AMD_EXTFEATURES_K8_REV_D_EDX (AMD_EXTFEATURES_K8_REV_C_EDX |\ + cpufeat_mask(X86_FEATURE_FFXSR)) + +/* Family 0Fh, Revision E */ +#define AMD_FEATURES_K8_REV_E_ECX (AMD_FEATURES_K8_REV_D_ECX | \ + cpufeat_mask(X86_FEATURE_SSE3)) +#define AMD_FEATURES_K8_REV_E_EDX (AMD_FEATURES_K8_REV_D_EDX | \ + cpufeat_mask(X86_FEATURE_HTT)) +#define AMD_EXTFEATURES_K8_REV_E_ECX (AMD_EXTFEATURES_K8_REV_D_ECX |\ + cpufeat_mask(X86_FEATURE_CMP_LEGACY)) +#define AMD_EXTFEATURES_K8_REV_E_EDX AMD_EXTFEATURES_K8_REV_D_EDX + +/* Family 0Fh, Revision F */ +#define AMD_FEATURES_K8_REV_F_ECX (AMD_FEATURES_K8_REV_E_ECX | \ + cpufeat_mask(X86_FEATURE_CX16)) +#define AMD_FEATURES_K8_REV_F_EDX AMD_FEATURES_K8_REV_E_EDX +#define AMD_EXTFEATURES_K8_REV_F_ECX (AMD_EXTFEATURES_K8_REV_E_ECX |\ + cpufeat_mask(X86_FEATURE_SVM) | cpufeat_mask(X86_FEATURE_EXTAPIC) | \ + cpufeat_mask(X86_FEATURE_CR8_LEGACY)) +#define AMD_EXTFEATURES_K8_REV_F_EDX (AMD_EXTFEATURES_K8_REV_E_EDX |\ + cpufeat_mask(X86_FEATURE_RDTSCP)) + +/* Family 0Fh, Revision G */ +#define AMD_FEATURES_K8_REV_G_ECX AMD_FEATURES_K8_REV_F_ECX +#define AMD_FEATURES_K8_REV_G_EDX AMD_FEATURES_K8_REV_F_EDX +#define AMD_EXTFEATURES_K8_REV_G_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\ + cpufeat_mask(X86_FEATURE_3DNOWPREFETCH)) +#define AMD_EXTFEATURES_K8_REV_G_EDX AMD_EXTFEATURES_K8_REV_F_EDX + +/* Family 10h, Revision B */ +#define AMD_FEATURES_FAM10h_REV_B_ECX (AMD_FEATURES_K8_REV_F_ECX | \ + cpufeat_mask(X86_FEATURE_POPCNT) | cpufeat_mask(X86_FEATURE_MONITOR)) +#define AMD_FEATURES_FAM10h_REV_B_EDX AMD_FEATURES_K8_REV_F_EDX +#define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\ + cpufeat_mask(X86_FEATURE_ABM) | cpufeat_mask(X86_FEATURE_SSE4A) | \ + cpufeat_mask(X86_FEATURE_MISALIGNSSE) | cpufeat_mask(X86_FEATURE_OSVW) |\ + cpufeat_mask(X86_FEATURE_IBS)) +#define AMD_EXTFEATURES_FAM10h_REV_B_EDX (AMD_EXTFEATURES_K8_REV_F_EDX |\ + cpufeat_mask(X86_FEATURE_PAGE1GB)) + +/* Family 10h, Revision C */ +#define AMD_FEATURES_FAM10h_REV_C_ECX AMD_FEATURES_FAM10h_REV_B_ECX +#define AMD_FEATURES_FAM10h_REV_C_EDX AMD_FEATURES_FAM10h_REV_B_EDX +#define AMD_EXTFEATURES_FAM10h_REV_C_ECX (AMD_EXTFEATURES_FAM10h_REV_B_ECX |\ + cpufeat_mask(X86_FEATURE_SKINIT) | cpufeat_mask(X86_FEATURE_WDT)) +#define AMD_EXTFEATURES_FAM10h_REV_C_EDX AMD_EXTFEATURES_FAM10h_REV_B_EDX + +/* Family 11h, Revision B */ +#define AMD_FEATURES_FAM11h_REV_B_ECX AMD_FEATURES_K8_REV_G_ECX +#define AMD_FEATURES_FAM11h_REV_B_EDX AMD_FEATURES_K8_REV_G_EDX +#define AMD_EXTFEATURES_FAM11h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_G_ECX |\ + cpufeat_mask(X86_FEATURE_SKINIT)) +#define AMD_EXTFEATURES_FAM11h_REV_B_EDX AMD_EXTFEATURES_K8_REV_G_EDX + +/* AMD errata checking + * + * Errata are defined using the AMD_LEGACY_ERRATUM() or AMD_OSVW_ERRATUM() + * macros. The latter is intended for newer errata that have an OSVW id + * assigned, which it takes as first argument. Both take a variable number + * of family-specific model-stepping ranges created by AMD_MODEL_RANGE(). + * + * Example 1: + * #define AMD_ERRATUM_319 \ + * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), \ + * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), \ + * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)) + * Example 2: + * #define AMD_ERRATUM_400 \ + * AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), \ + * AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)) + * + */ + +#define AMD_LEGACY_ERRATUM(...) -1 /* legacy */, __VA_ARGS__, 0 +#define AMD_OSVW_ERRATUM(osvw_id, ...) osvw_id, __VA_ARGS__, 0 +#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ + ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) + +#define AMD_ERRATUM_121 \ + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0x3f, 0xf)) + +#define AMD_ERRATUM_170 \ + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0x67, 0xf)) + +#define AMD_ERRATUM_383 \ + AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf), \ + AMD_MODEL_RANGE(0x12, 0x0, 0x0, 0x1, 0x0)) + +#define AMD_ERRATUM_573 \ + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0xff, 0xf), \ + AMD_MODEL_RANGE(0x10, 0x0, 0x0, 0xff, 0xf), \ + AMD_MODEL_RANGE(0x11, 0x0, 0x0, 0xff, 0xf), \ + AMD_MODEL_RANGE(0x12, 0x0, 0x0, 0xff, 0xf)) + +struct cpuinfo_x86; +int cpu_has_amd_erratum(const struct cpuinfo_x86 *, int, ...); + +extern s8 opt_allow_unsafe; + +void fam10h_check_enable_mmcfg(void); +void check_enable_amd_mmconf_dmi(void); + +extern bool amd_acpi_c1e_quirk; +void amd_check_disable_c1e(unsigned int port, u8 value); + +#endif /* __AMD_H__ */ diff --git a/xen/arch/x86/include/asm/apic.h b/xen/arch/x86/include/asm/apic.h new file mode 100644 index 0000000000..2fe54bbf1c --- /dev/null +++ b/xen/arch/x86/include/asm/apic.h @@ -0,0 +1,202 @@ +#ifndef __ASM_APIC_H +#define __ASM_APIC_H + +#include +#include +#include + +#define Dprintk(x...) do {} while (0) + +/* + * Debugging macros + */ +#define APIC_QUIET 0 +#define APIC_VERBOSE 1 +#define APIC_DEBUG 2 + +#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) + +/* Possible APIC states */ +enum apic_mode { + APIC_MODE_INVALID, /* Not set yet */ + APIC_MODE_DISABLED, /* If uniprocessor, or MP in uniprocessor mode */ + APIC_MODE_XAPIC, /* xAPIC mode - default upon chipset reset */ + APIC_MODE_X2APIC /* x2APIC mode - common for large MP machines */ +}; + +extern bool iommu_x2apic_enabled; +extern u8 apic_verbosity; +extern bool directed_eoi_enabled; + +void check_x2apic_preenabled(void); +void x2apic_bsp_setup(void); +void x2apic_ap_setup(void); +const struct genapic *apic_x2apic_probe(void); + +/* + * Define the default level of output to be very little + * This can be turned up by using apic=verbose for more + * information and apic=debug for _lots_ of information. + * apic_verbosity is defined in apic.c + */ +#define apic_printk(v, s, a...) do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ + } while (0) + + +/* + * Basic functions accessing APICs. + */ + +static __inline void apic_mem_write(unsigned long reg, u32 v) +{ + *((volatile u32 *)(APIC_BASE+reg)) = v; +} + +static __inline void apic_mem_write_atomic(unsigned long reg, u32 v) +{ + (void)xchg((volatile u32 *)(APIC_BASE+reg), v); +} + +static __inline u32 apic_mem_read(unsigned long reg) +{ + return *((volatile u32 *)(APIC_BASE+reg)); +} + +/* NOTE: in x2APIC mode, we should use apic_icr_write()/apic_icr_read() to + * access the 64-bit ICR register. + */ + +static __inline void apic_wrmsr(unsigned long reg, uint64_t msr_content) +{ + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || + reg == APIC_LVR) + return; + + wrmsrl(APIC_MSR_BASE + (reg >> 4), msr_content); +} + +static __inline uint64_t apic_rdmsr(unsigned long reg) +{ + uint64_t msr_content; + + if (reg == APIC_DFR) + return -1u; + + rdmsrl(APIC_MSR_BASE + (reg >> 4), msr_content); + return msr_content; +} + +static __inline void apic_write(unsigned long reg, u32 v) +{ + + if ( x2apic_enabled ) + apic_wrmsr(reg, v); + else + apic_mem_write(reg, v); +} + +static __inline void apic_write_atomic(unsigned long reg, u32 v) +{ + if ( x2apic_enabled ) + apic_wrmsr(reg, v); + else + apic_mem_write_atomic(reg, v); +} + +static __inline u32 apic_read(unsigned long reg) +{ + if ( x2apic_enabled ) + return apic_rdmsr(reg); + else + return apic_mem_read(reg); +} + +static __inline u64 apic_icr_read(void) +{ + u32 lo, hi; + + if ( x2apic_enabled ) + return apic_rdmsr(APIC_ICR); + else + { + lo = apic_mem_read(APIC_ICR); + hi = apic_mem_read(APIC_ICR2); + } + + return ((u64)lo) | (((u64)hi) << 32); +} + +static __inline void apic_icr_write(u32 low, u32 dest) +{ + if ( x2apic_enabled ) + apic_wrmsr(APIC_ICR, low | ((uint64_t)dest << 32)); + else + { + apic_mem_write(APIC_ICR2, dest << 24); + apic_mem_write(APIC_ICR, low); + } +} + +static __inline bool_t apic_isr_read(u8 vector) +{ + return (apic_read(APIC_ISR + ((vector & ~0x1f) >> 1)) >> + (vector & 0x1f)) & 1; +} + +static __inline u32 get_apic_id(void) /* Get the physical APIC id */ +{ + u32 id = apic_read(APIC_ID); + return x2apic_enabled ? id : GET_xAPIC_ID(id); +} + +void apic_wait_icr_idle(void); + +int get_physical_broadcast(void); + +static inline void ack_APIC_irq(void) +{ + /* Docs say use 0 for future compatibility */ + apic_write(APIC_EOI, 0); +} + +extern int get_maxlvt(void); +extern void clear_local_APIC(void); +extern void connect_bsp_APIC (void); +extern void disconnect_bsp_APIC (int virt_wire_setup); +extern void disable_local_APIC (void); +extern int verify_local_APIC (void); +extern void cache_APIC_registers (void); +extern void sync_Arb_IDs (void); +extern void init_bsp_APIC (void); +extern void setup_local_APIC(bool bsp); +extern void init_apic_mappings (void); +extern void smp_local_timer_interrupt (struct cpu_user_regs *regs); +extern void setup_boot_APIC_clock (void); +extern void setup_secondary_APIC_clock (void); +extern void setup_apic_nmi_watchdog (void); +extern void disable_lapic_nmi_watchdog(void); +extern int reserve_lapic_nmi(void); +extern void release_lapic_nmi(void); +extern void self_nmi(void); +extern void disable_timer_nmi_watchdog(void); +extern void enable_timer_nmi_watchdog(void); +extern bool nmi_watchdog_tick(const struct cpu_user_regs *regs); +extern int APIC_init_uniprocessor (void); +extern void disable_APIC_timer(void); +extern void enable_APIC_timer(void); +extern int lapic_suspend(void); +extern int lapic_resume(void); +extern void record_boot_APIC_mode(void); +extern enum apic_mode current_local_apic_mode(void); +extern void check_for_unexpected_msi(unsigned int vector); + +extern void check_nmi_watchdog(void); + +extern unsigned int nmi_watchdog; +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 + +#endif /* __ASM_APIC_H */ diff --git a/xen/arch/x86/include/asm/apicdef.h b/xen/arch/x86/include/asm/apicdef.h new file mode 100644 index 0000000000..0633da9fe1 --- /dev/null +++ b/xen/arch/x86/include/asm/apicdef.h @@ -0,0 +1,134 @@ +#ifndef __ASM_APICDEF_H +#define __ASM_APICDEF_H + +/* + * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) + * + * Alan Cox , 1995. + * Ingo Molnar , 1999, 2000 + */ + +#define APIC_DEFAULT_PHYS_BASE 0xfee00000 + +#define APIC_ID 0x20 +#define APIC_ID_MASK (0xFFu<<24) +#define GET_xAPIC_ID(x) (((x)>>24)&0xFFu) +#define SET_xAPIC_ID(x) (((x)<<24)) +#define APIC_LVR 0x30 +#define APIC_LVR_MASK 0xFF00FF +#define APIC_LVR_DIRECTED_EOI (1 << 24) +#define GET_APIC_VERSION(x) ((x)&0xFF) +#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) +#define APIC_XAPIC(x) ((x) >= 0x14) +#define APIC_TASKPRI 0x80 +#define APIC_TPRI_MASK 0xFF +#define APIC_ARBPRI 0x90 +#define APIC_ARBPRI_MASK 0xFF +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ +#define APIC_RRR 0xC0 +#define APIC_LDR 0xD0 +#define APIC_LDR_MASK (0xFFu<<24) +#define GET_xAPIC_LOGICAL_ID(x) (((x)>>24)&0xFF) +#define SET_xAPIC_LOGICAL_ID(x) (((x)<<24)) +#define APIC_ALL_CPUS 0xFF +#define APIC_DFR 0xE0 +#define APIC_DFR_CLUSTER 0x0FFFFFFFul +#define APIC_DFR_FLAT 0xFFFFFFFFul +#define APIC_SPIV 0xF0 +#define APIC_SPIV_FOCUS_DISABLED (1<<9) +#define APIC_SPIV_APIC_ENABLED (1<<8) +#define APIC_SPIV_DIRECTED_EOI (1<<12) +#define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ +#define APIC_TMR 0x180 +#define APIC_IRR 0x200 +#define APIC_ESR 0x280 +#define APIC_ESR_SEND_CS 0x00001 +#define APIC_ESR_RECV_CS 0x00002 +#define APIC_ESR_SEND_ACC 0x00004 +#define APIC_ESR_RECV_ACC 0x00008 +#define APIC_ESR_SENDILL 0x00020 +#define APIC_ESR_RECVILL 0x00040 +#define APIC_ESR_ILLREGA 0x00080 +#define APIC_ICR 0x300 +#define APIC_DEST_NOSHORT 0x00000 +#define APIC_DEST_SELF 0x40000 +#define APIC_DEST_ALLINC 0x80000 +#define APIC_DEST_ALLBUT 0xC0000 +#define APIC_SHORT_MASK 0xC0000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 +#define APIC_DEST_MASK 0x00800 +#define APIC_DEST_LOGICAL 0x00800 +#define APIC_DEST_PHYSICAL 0x00000 +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF +#define APIC_ICR2 0x310 +#define GET_xAPIC_DEST_FIELD(x) (((x)>>24)&0xFF) +#define SET_xAPIC_DEST_FIELD(x) ((x)<<24) +#define APIC_LVTT 0x320 +#define APIC_LVTTHMR 0x330 +#define APIC_LVTPC 0x340 +#define APIC_LVT0 0x350 +#define APIC_CMCI 0x2F0 + +#define APIC_TIMER_MODE_MASK (0x3<<17) +#define APIC_TIMER_MODE_ONESHOT (0x0<<17) +#define APIC_TIMER_MODE_PERIODIC (0x1<<17) +#define APIC_TIMER_MODE_TSC_DEADLINE (0x2<<17) +#define APIC_LVT_MASKED (1<<16) +#define APIC_LVT_LEVEL_TRIGGER (1<<15) +#define APIC_LVT_REMOTE_IRR (1<<14) +#define APIC_INPUT_POLARITY (1<<13) +#define APIC_SEND_PENDING (1<<12) +#define APIC_MODE_MASK 0x700 +#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) +#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) +#define APIC_MODE_FIXED 0x0 +#define APIC_MODE_NMI 0x4 +#define APIC_MODE_EXTINT 0x7 +#define APIC_LVT1 0x360 +#define APIC_LVTERR 0x370 +#define APIC_TMICT 0x380 +#define APIC_TMCCT 0x390 +#define APIC_TDCR 0x3E0 +#define APIC_TDR_DIV_TMBASE (1<<2) +#define APIC_TDR_DIV_1 0xB +#define APIC_TDR_DIV_2 0x0 +#define APIC_TDR_DIV_4 0x1 +#define APIC_TDR_DIV_8 0x2 +#define APIC_TDR_DIV_16 0x3 +#define APIC_TDR_DIV_32 0x8 +#define APIC_TDR_DIV_64 0x9 +#define APIC_TDR_DIV_128 0xA + +/* Only available in x2APIC mode */ +#define APIC_SELF_IPI 0x3F0 + +/* Applicable to vectors, TPR, and PPR. */ +#define APIC_PRIO_CLASS(v) ((v) & 0xF0) + +#define APIC_BASE __fix_to_virt(FIX_APIC_BASE) + +/* It's only used in x2APIC mode of an x2APIC unit. */ +#define APIC_MSR_BASE 0x800 + +#define MAX_IO_APICS 128 + +extern bool x2apic_enabled; + +#endif diff --git a/xen/arch/x86/include/asm/asm-defns.h b/xen/arch/x86/include/asm/asm-defns.h new file mode 100644 index 0000000000..505f39ad5f --- /dev/null +++ b/xen/arch/x86/include/asm/asm-defns.h @@ -0,0 +1,78 @@ +#ifndef HAVE_AS_CLAC_STAC +.macro clac + .byte 0x0f, 0x01, 0xca +.endm + +.macro stac + .byte 0x0f, 0x01, 0xcb +.endm +#endif + +.macro vmrun + .byte 0x0f, 0x01, 0xd8 +.endm + +.macro stgi + .byte 0x0f, 0x01, 0xdc +.endm + +.macro clgi + .byte 0x0f, 0x01, 0xdd +.endm + +.macro INDIRECT_BRANCH insn:req arg:req +/* + * Create an indirect branch. insn is one of call/jmp, arg is a single + * register. + * + * With no compiler support, this degrades into a plain indirect call/jmp. + * With compiler support, dispatch to the correct __x86_indirect_thunk_* + */ + .if CONFIG_INDIRECT_THUNK == 1 + + $done = 0 + .irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15 + .ifeqs "\arg", "%r\reg" + \insn __x86_indirect_thunk_r\reg + $done = 1 + .exitm + .endif + .endr + + .if $done != 1 + .error "Bad register arg \arg" + .endif + + .else + \insn *\arg + .endif +.endm + +/* Convenience wrappers. */ +.macro INDIRECT_CALL arg:req + INDIRECT_BRANCH call \arg +.endm + +.macro INDIRECT_JMP arg:req + INDIRECT_BRANCH jmp \arg +.endm + +.macro guest_access_mask_ptr ptr:req, scratch1:req, scratch2:req +#if defined(CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS) + /* + * Here we want + * + * ptr &= ~0ull >> (ptr < HYPERVISOR_VIRT_END); + * + * but guaranteed without any conditional branches (hence in assembly). + */ + mov $(HYPERVISOR_VIRT_END - 1), \scratch1 + mov $~0, \scratch2 + cmp \ptr, \scratch1 + rcr $1, \scratch2 + and \scratch2, \ptr +#elif defined(CONFIG_DEBUG) && defined(CONFIG_PV) + xor $~\@, \scratch1 + xor $~\@, \scratch2 +#endif +.endm diff --git a/xen/arch/x86/include/asm/asm_defns.h b/xen/arch/x86/include/asm/asm_defns.h new file mode 100644 index 0000000000..d9431180cf --- /dev/null +++ b/xen/arch/x86/include/asm/asm_defns.h @@ -0,0 +1,354 @@ + +#ifndef __X86_ASM_DEFNS_H__ +#define __X86_ASM_DEFNS_H__ + +#ifndef COMPILE_OFFSETS +/* NB. Auto-generated from arch/.../asm-offsets.c */ +#include +#endif +#include +#include +#include +#include +#include + +#ifdef __ASSEMBLY__ +#include +#ifndef CONFIG_INDIRECT_THUNK +.equ CONFIG_INDIRECT_THUNK, 0 +#endif +#else +#include +asm ( "\t.equ CONFIG_INDIRECT_THUNK, " + __stringify(IS_ENABLED(CONFIG_INDIRECT_THUNK)) ); +#endif + +#ifndef __ASSEMBLY__ + +/* + * This output constraint should be used for any inline asm which has a "call" + * instruction. Otherwise the asm may be inserted before the frame pointer + * gets set up by the containing function. + */ +#ifdef CONFIG_FRAME_POINTER +register unsigned long current_stack_pointer asm("rsp"); +# define ASM_CALL_CONSTRAINT , "+r" (current_stack_pointer) +#else +# define ASM_CALL_CONSTRAINT +#endif + +#endif + +#ifndef NDEBUG +#define ASSERT_INTERRUPT_STATUS(x, msg) \ + pushf; \ + testb $X86_EFLAGS_IF>>8,1(%rsp); \ + j##x 1f; \ + ASSERT_FAILED(msg); \ +1: addq $8,%rsp; +#else +#define ASSERT_INTERRUPT_STATUS(x, msg) +#endif + +#define ASSERT_INTERRUPTS_ENABLED \ + ASSERT_INTERRUPT_STATUS(nz, "INTERRUPTS ENABLED") +#define ASSERT_INTERRUPTS_DISABLED \ + ASSERT_INTERRUPT_STATUS(z, "INTERRUPTS DISABLED") + +#ifdef __ASSEMBLY__ +# define _ASM_EX(p) p-. +#else +# define _ASM_EX(p) #p "-." +#endif + +/* Exception table entry */ +#ifdef __ASSEMBLY__ +# define _ASM__EXTABLE(sfx, from, to) \ + .section .ex_table##sfx, "a" ; \ + .balign 4 ; \ + .long _ASM_EX(from), _ASM_EX(to) ; \ + .previous +#else +# define _ASM__EXTABLE(sfx, from, to) \ + " .section .ex_table" #sfx ",\"a\"\n" \ + " .balign 4\n" \ + " .long " _ASM_EX(from) ", " _ASM_EX(to) "\n" \ + " .previous\n" +#endif + +#define _ASM_EXTABLE(from, to) _ASM__EXTABLE(, from, to) +#define _ASM_PRE_EXTABLE(from, to) _ASM__EXTABLE(.pre, from, to) + +#ifdef __ASSEMBLY__ + +#ifdef HAVE_AS_QUOTED_SYM +#define SUBSECTION_LBL(tag) \ + .ifndef .L.tag; \ + .equ .L.tag, 1; \ + .equ __stringify(__OBJECT_LABEL__.tag), .; \ + .endif +#else +#define SUBSECTION_LBL(tag) \ + .ifndef __OBJECT_LABEL__.tag; \ + __OBJECT_LABEL__.tag:; \ + .endif +#endif + +#define UNLIKELY_START(cond, tag) \ + .Ldispatch.tag: \ + j##cond .Lunlikely.tag; \ + .subsection 1; \ + SUBSECTION_LBL(unlikely); \ + .Lunlikely.tag: + +#define UNLIKELY_DISPATCH_LABEL(tag) \ + .Ldispatch.tag + +#define UNLIKELY_DONE(cond, tag) \ + j##cond .Llikely.tag + +#define __UNLIKELY_END(tag) \ + .subsection 0; \ + .Llikely.tag: + +#define UNLIKELY_END(tag) \ + UNLIKELY_DONE(mp, tag); \ + __UNLIKELY_END(tag) + + .equ .Lrax, 0 + .equ .Lrcx, 1 + .equ .Lrdx, 2 + .equ .Lrbx, 3 + .equ .Lrsp, 4 + .equ .Lrbp, 5 + .equ .Lrsi, 6 + .equ .Lrdi, 7 + .equ .Lr8, 8 + .equ .Lr9, 9 + .equ .Lr10, 10 + .equ .Lr11, 11 + .equ .Lr12, 12 + .equ .Lr13, 13 + .equ .Lr14, 14 + .equ .Lr15, 15 + +#define STACK_CPUINFO_FIELD(field) (1 - CPUINFO_sizeof + CPUINFO_##field) +#define GET_STACK_END(reg) \ + .if .Lr##reg >= 8; \ + movq $STACK_SIZE-1, %r##reg; \ + .else; \ + movl $STACK_SIZE-1, %e##reg; \ + .endif; \ + orq %rsp, %r##reg + +#define GET_CPUINFO_FIELD(field, reg) \ + GET_STACK_END(reg); \ + addq $STACK_CPUINFO_FIELD(field), %r##reg + +#define GET_CURRENT(reg) \ + GET_STACK_END(reg); \ + movq STACK_CPUINFO_FIELD(current_vcpu)(%r##reg), %r##reg + +#ifndef NDEBUG +#define ASSERT_NOT_IN_ATOMIC \ + sti; /* sometimes called with interrupts disabled: safe to enable */ \ + call ASSERT_NOT_IN_ATOMIC +#else +#define ASSERT_NOT_IN_ATOMIC +#endif + +#define CPUINFO_FEATURE_OFFSET(feature) \ + (CPUINFO_features + (cpufeat_word(feature) * 4)) + +#else + +#ifdef HAVE_AS_QUOTED_SYM +#define SUBSECTION_LBL(tag) \ + ".ifndef .L." #tag "\n\t" \ + ".equ .L." #tag ", 1\n\t" \ + ".equ \"" __stringify(__OBJECT_LABEL__) "." #tag "\", .\n\t" \ + ".endif" +#else +#define SUBSECTION_LBL(tag) \ + ".ifndef " __stringify(__OBJECT_LABEL__) "." #tag "\n\t" \ + __stringify(__OBJECT_LABEL__) "." #tag ":\n\t" \ + ".endif" +#endif + +#ifdef __clang__ /* clang's builtin assember can't do .subsection */ + +#define UNLIKELY_START_SECTION ".pushsection .text.unlikely,\"ax\"" +#define UNLIKELY_END_SECTION ".popsection" + +#else + +#define UNLIKELY_START_SECTION ".subsection 1" +#define UNLIKELY_END_SECTION ".subsection 0" + +#endif + +#define UNLIKELY_START(cond, tag) \ + "j" #cond " .Lunlikely." #tag ".%=;\n\t" \ + UNLIKELY_START_SECTION "\n\t" \ + SUBSECTION_LBL(unlikely) "\n" \ + ".Lunlikely." #tag ".%=:" + +#define UNLIKELY_END(tag) \ + "jmp .Llikely." #tag ".%=;\n\t" \ + UNLIKELY_END_SECTION "\n" \ + ".Llikely." #tag ".%=:" + +static always_inline void clac(void) +{ + /* Note: a barrier is implicit in alternative() */ + alternative("", "clac", X86_FEATURE_XEN_SMAP); +} + +static always_inline void stac(void) +{ + /* Note: a barrier is implicit in alternative() */ + alternative("", "stac", X86_FEATURE_XEN_SMAP); +} +#endif + +#ifdef __ASSEMBLY__ +.macro SAVE_ALL compat=0 + addq $-(UREGS_error_code-UREGS_r15), %rsp + cld + movq %rdi,UREGS_rdi(%rsp) + xor %edi, %edi + movq %rsi,UREGS_rsi(%rsp) + xor %esi, %esi + movq %rdx,UREGS_rdx(%rsp) + xor %edx, %edx + movq %rcx,UREGS_rcx(%rsp) + xor %ecx, %ecx + movq %rax,UREGS_rax(%rsp) + xor %eax, %eax +.if !\compat + movq %r8,UREGS_r8(%rsp) + movq %r9,UREGS_r9(%rsp) + movq %r10,UREGS_r10(%rsp) + movq %r11,UREGS_r11(%rsp) +.endif + xor %r8d, %r8d + xor %r9d, %r9d + xor %r10d, %r10d + xor %r11d, %r11d + movq %rbx,UREGS_rbx(%rsp) + xor %ebx, %ebx + movq %rbp,UREGS_rbp(%rsp) +#ifdef CONFIG_FRAME_POINTER +/* Indicate special exception stack frame by inverting the frame pointer. */ + leaq UREGS_rbp(%rsp), %rbp + notq %rbp +#else + xor %ebp, %ebp +#endif +.if !\compat + movq %r12,UREGS_r12(%rsp) + movq %r13,UREGS_r13(%rsp) + movq %r14,UREGS_r14(%rsp) + movq %r15,UREGS_r15(%rsp) +.endif + xor %r12d, %r12d + xor %r13d, %r13d + xor %r14d, %r14d + xor %r15d, %r15d +.endm + +#define LOAD_ONE_REG(reg, compat) \ +.if !(compat); \ + movq UREGS_r##reg(%rsp),%r##reg; \ +.else; \ + movl UREGS_r##reg(%rsp),%e##reg; \ +.endif + +/* + * Restore all previously saved registers. + * + * @adj: extra stack pointer adjustment to be folded into the adjustment done + * anyway at the end of the macro + * @compat: R8-R15 don't need reloading, but they are clobbered for added + * safety against information leaks. + */ +.macro RESTORE_ALL adj=0 compat=0 +.if !\compat + movq UREGS_r15(%rsp), %r15 + movq UREGS_r14(%rsp), %r14 + movq UREGS_r13(%rsp), %r13 + movq UREGS_r12(%rsp), %r12 +.else + xor %r15d, %r15d + xor %r14d, %r14d + xor %r13d, %r13d + xor %r12d, %r12d +.endif + LOAD_ONE_REG(bp, \compat) + LOAD_ONE_REG(bx, \compat) +.if !\compat + movq UREGS_r11(%rsp),%r11 + movq UREGS_r10(%rsp),%r10 + movq UREGS_r9(%rsp),%r9 + movq UREGS_r8(%rsp),%r8 +.else + xor %r11d, %r11d + xor %r10d, %r10d + xor %r9d, %r9d + xor %r8d, %r8d +.endif + LOAD_ONE_REG(ax, \compat) + LOAD_ONE_REG(cx, \compat) + LOAD_ONE_REG(dx, \compat) + LOAD_ONE_REG(si, \compat) + LOAD_ONE_REG(di, \compat) + subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp +.endm + +#ifdef CONFIG_PV32 +#define CR4_PV32_RESTORE \ + ALTERNATIVE_2 "", \ + "call cr4_pv32_restore", X86_FEATURE_XEN_SMEP, \ + "call cr4_pv32_restore", X86_FEATURE_XEN_SMAP +#else +#define CR4_PV32_RESTORE +#endif + +#include + +#endif + +/* Work around AMD erratum #88 */ +#define safe_swapgs \ + "mfence; swapgs;" + +#ifdef __sun__ +#define REX64_PREFIX "rex64\\" +#elif defined(__clang__) +#define REX64_PREFIX ".byte 0x48; " +#else +#define REX64_PREFIX "rex64/" +#endif + +#define ELFNOTE(name, type, desc) \ + .pushsection .note.name, "a", @note ; \ + .p2align 2 ; \ + .long 2f - 1f /* namesz */ ; \ + .long 4f - 3f /* descsz */ ; \ + .long type /* type */ ; \ +1: .asciz #name /* name */ ; \ +2: .p2align 2 ; \ +3: desc /* desc */ ; \ +4: .p2align 2 ; \ + .popsection + +#define ASM_INT(label, val) \ + .p2align 2; \ +label: .long (val); \ + .size label, . - label; \ + .type label, @object + +#define ASM_CONSTANT(name, value) \ + asm ( ".equ " #name ", %P0; .global " #name \ + :: "i" ((value)) ); +#endif /* __X86_ASM_DEFNS_H__ */ diff --git a/xen/arch/x86/include/asm/atomic.h b/xen/arch/x86/include/asm/atomic.h new file mode 100644 index 0000000000..27aad43aaa --- /dev/null +++ b/xen/arch/x86/include/asm/atomic.h @@ -0,0 +1,239 @@ +#ifndef __ARCH_X86_ATOMIC__ +#define __ARCH_X86_ATOMIC__ + +#include +#include + +#define build_read_atomic(name, size, type, reg) \ +static inline type name(const volatile type *addr) \ +{ \ + type ret; \ + asm volatile ( "mov" size " %1,%0" : reg (ret) : "m" (*addr) ); \ + return ret; \ +} + +#define build_write_atomic(name, size, type, reg) \ +static inline void name(volatile type *addr, type val) \ +{ \ + asm volatile ( "mov" size " %1,%0" : "=m" (*addr) : reg (val) ); \ +} + +#define build_add_sized(name, size, type, reg) \ + static inline void name(volatile type *addr, type val) \ + { \ + asm volatile("add" size " %1,%0" \ + : "=m" (*addr) \ + : reg (val)); \ + } + +build_read_atomic(read_u8_atomic, "b", uint8_t, "=q") +build_read_atomic(read_u16_atomic, "w", uint16_t, "=r") +build_read_atomic(read_u32_atomic, "l", uint32_t, "=r") +build_read_atomic(read_u64_atomic, "q", uint64_t, "=r") + +build_write_atomic(write_u8_atomic, "b", uint8_t, "q") +build_write_atomic(write_u16_atomic, "w", uint16_t, "r") +build_write_atomic(write_u32_atomic, "l", uint32_t, "r") +build_write_atomic(write_u64_atomic, "q", uint64_t, "r") + +build_add_sized(add_u8_sized, "b", uint8_t, "qi") +build_add_sized(add_u16_sized, "w", uint16_t, "ri") +build_add_sized(add_u32_sized, "l", uint32_t, "ri") +build_add_sized(add_u64_sized, "q", uint64_t, "ri") + +#undef build_read_atomic +#undef build_write_atomic +#undef build_add_sized + +void __bad_atomic_size(void); + +#define read_atomic(p) ({ \ + unsigned long x_; \ + CLANG_DISABLE_WARN_GCC_COMPAT_START \ + switch ( sizeof(*(p)) ) { \ + case 1: x_ = read_u8_atomic((uint8_t *)(p)); break; \ + case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \ + case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \ + case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \ + default: x_ = 0; __bad_atomic_size(); break; \ + } \ + CLANG_DISABLE_WARN_GCC_COMPAT_END \ + (typeof(*(p)))x_; \ +}) + +#define write_atomic(p, x) ({ \ + typeof(*(p)) __x = (x); \ + /* Check that the pointer is not a const type */ \ + void *__maybe_unused p_ = &__x; \ + unsigned long x_ = (unsigned long)__x; \ + switch ( sizeof(*(p)) ) { \ + case 1: write_u8_atomic((uint8_t *)(p), x_); break; \ + case 2: write_u16_atomic((uint16_t *)(p), x_); break; \ + case 4: write_u32_atomic((uint32_t *)(p), x_); break; \ + case 8: write_u64_atomic((uint64_t *)(p), x_); break; \ + default: __bad_atomic_size(); break; \ + } \ +}) + +#define add_sized(p, x) ({ \ + typeof(*(p)) x_ = (x); \ + switch ( sizeof(*(p)) ) \ + { \ + case 1: add_u8_sized((uint8_t *)(p), x_); break; \ + case 2: add_u16_sized((uint16_t *)(p), x_); break; \ + case 4: add_u32_sized((uint32_t *)(p), x_); break; \ + case 8: add_u64_sized((uint64_t *)(p), x_); break; \ + default: __bad_atomic_size(); break; \ + } \ +}) + +static inline int atomic_read(const atomic_t *v) +{ + return read_atomic(&v->counter); +} + +static inline int _atomic_read(atomic_t v) +{ + return v.counter; +} + +static inline void atomic_set(atomic_t *v, int i) +{ + write_atomic(&v->counter, i); +} + +static inline void _atomic_set(atomic_t *v, int i) +{ + v->counter = i; +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} + +static inline void atomic_add(int i, atomic_t *v) +{ + asm volatile ( + "lock; addl %1,%0" + : "=m" (*(volatile int *)&v->counter) + : "ir" (i), "m" (*(volatile int *)&v->counter) ); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + return i + arch_fetch_and_add(&v->counter, i); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + asm volatile ( + "lock; subl %1,%0" + : "=m" (*(volatile int *)&v->counter) + : "ir" (i), "m" (*(volatile int *)&v->counter) ); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + return arch_fetch_and_add(&v->counter, -i) - i; +} + +static inline int atomic_sub_and_test(int i, atomic_t *v) +{ + bool c; + + asm volatile ( "lock; subl %[i], %[counter]\n\t" + ASM_FLAG_OUT(, "setz %[zf]\n\t") + : [counter] "+m" (*(volatile int *)&v->counter), + [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c) + : [i] "ir" (i) : "memory" ); + + return c; +} + +static inline void atomic_inc(atomic_t *v) +{ + asm volatile ( + "lock; incl %0" + : "=m" (*(volatile int *)&v->counter) + : "m" (*(volatile int *)&v->counter) ); +} + +static inline int atomic_inc_return(atomic_t *v) +{ + return atomic_add_return(1, v); +} + +static inline int atomic_inc_and_test(atomic_t *v) +{ + bool c; + + asm volatile ( "lock; incl %[counter]\n\t" + ASM_FLAG_OUT(, "setz %[zf]\n\t") + : [counter] "+m" (*(volatile int *)&v->counter), + [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c) + :: "memory" ); + + return c; +} + +static inline void atomic_dec(atomic_t *v) +{ + asm volatile ( + "lock; decl %0" + : "=m" (*(volatile int *)&v->counter) + : "m" (*(volatile int *)&v->counter) ); +} + +static inline int atomic_dec_return(atomic_t *v) +{ + return atomic_sub_return(1, v); +} + +static inline int atomic_dec_and_test(atomic_t *v) +{ + bool c; + + asm volatile ( "lock; decl %[counter]\n\t" + ASM_FLAG_OUT(, "setz %[zf]\n\t") + : [counter] "+m" (*(volatile int *)&v->counter), + [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c) + :: "memory" ); + + return c; +} + +static inline int atomic_add_negative(int i, atomic_t *v) +{ + bool c; + + asm volatile ( "lock; addl %[i], %[counter]\n\t" + ASM_FLAG_OUT(, "sets %[sf]\n\t") + : [counter] "+m" (*(volatile int *)&v->counter), + [sf] ASM_FLAG_OUT("=@ccs", "=qm") (c) + : [i] "ir" (i) : "memory" ); + + return c; +} + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c; +} + +static inline void atomic_and(int m, atomic_t *v) +{ + asm volatile ( + "lock andl %1, %0" + : "+m" (*(volatile int *)&v->counter) + : "ir" (m) ); +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +#endif /* __ARCH_X86_ATOMIC__ */ diff --git a/xen/arch/x86/include/asm/bitops.h b/xen/arch/x86/include/asm/bitops.h new file mode 100644 index 0000000000..5a71afbc89 --- /dev/null +++ b/xen/arch/x86/include/asm/bitops.h @@ -0,0 +1,483 @@ +#ifndef _X86_BITOPS_H +#define _X86_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + */ + +#include +#include + +/* + * We specify the memory operand as both input and output because the memory + * operand is both read from and written to. Since the operand is in fact a + * word array, we also specify "memory" in the clobbers list to indicate that + * words other than the one directly addressed by the memory operand may be + * modified. + */ + +#define ADDR (*(volatile int *) addr) +#define CONST_ADDR (*(const volatile int *) addr) + +extern void __bitop_bad_size(void); +#define bitop_bad_size(addr) (sizeof(*(addr)) < 4) + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile void *addr) +{ + asm volatile ( "lock; btsl %1,%0" + : "+m" (ADDR) : "Ir" (nr) : "memory"); +} +#define set_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + set_bit(nr, addr); \ +}) + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void variable_set_bit(int nr, void *addr) +{ + asm volatile ( "btsl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" ); +} +static inline void constant_set_bit(int nr, void *addr) +{ + ((unsigned int *)addr)[nr >> 5] |= (1u << (nr & 31)); +} +#define __set_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + __builtin_constant_p(nr) ? \ + constant_set_bit(nr, addr) : \ + variable_set_bit(nr, addr); \ +}) + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. + */ +static inline void clear_bit(int nr, volatile void *addr) +{ + asm volatile ( "lock; btrl %1,%0" + : "+m" (ADDR) : "Ir" (nr) : "memory"); +} +#define clear_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + clear_bit(nr, addr); \ +}) + +/** + * __clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * Unlike clear_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void variable_clear_bit(int nr, void *addr) +{ + asm volatile ( "btrl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" ); +} +static inline void constant_clear_bit(int nr, void *addr) +{ + ((unsigned int *)addr)[nr >> 5] &= ~(1u << (nr & 31)); +} +#define __clear_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + __builtin_constant_p(nr) ? \ + constant_clear_bit(nr, addr) : \ + variable_clear_bit(nr, addr); \ +}) + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void variable_change_bit(int nr, void *addr) +{ + asm volatile ( "btcl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" ); +} +static inline void constant_change_bit(int nr, void *addr) +{ + ((unsigned int *)addr)[nr >> 5] ^= (1u << (nr & 31)); +} +#define __change_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + __builtin_constant_p(nr) ? \ + constant_change_bit(nr, addr) : \ + variable_change_bit(nr, addr); \ +}) + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile void *addr) +{ + asm volatile ( "lock; btcl %1,%0" + : "+m" (ADDR) : "Ir" (nr) : "memory"); +} +#define change_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + change_bit(nr, addr); \ +}) + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile ( "lock; btsl %[nr], %[addr]\n\t" + ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") + : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), + [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" ); + + return oldbit; +} +#define test_and_set_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + test_and_set_bit(nr, addr); \ +}) + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, void *addr) +{ + int oldbit; + + asm volatile ( "btsl %[nr], %[addr]\n\t" + ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") + : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), + [addr] "+m" (*(int *)addr) : [nr] "Ir" (nr) : "memory" ); + + return oldbit; +} +#define __test_and_set_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + __test_and_set_bit(nr, addr); \ +}) + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile ( "lock; btrl %[nr], %[addr]\n\t" + ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") + : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), + [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" ); + + return oldbit; +} +#define test_and_clear_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + test_and_clear_bit(nr, addr); \ +}) + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, void *addr) +{ + int oldbit; + + asm volatile ( "btrl %[nr], %[addr]\n\t" + ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") + : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), + [addr] "+m" (*(int *)addr) : [nr] "Ir" (nr) : "memory" ); + + return oldbit; +} +#define __test_and_clear_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + __test_and_clear_bit(nr, addr); \ +}) + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, void *addr) +{ + int oldbit; + + asm volatile ( "btcl %[nr], %[addr]\n\t" + ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") + : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), + [addr] "+m" (*(int *)addr) : [nr] "Ir" (nr) : "memory" ); + + return oldbit; +} +#define __test_and_change_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + __test_and_change_bit(nr, addr); \ +}) + +/** + * test_and_change_bit - Change a bit and return its new value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile void *addr) +{ + int oldbit; + + asm volatile ( "lock; btcl %[nr], %[addr]\n\t" + ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") + : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), + [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" ); + + return oldbit; +} +#define test_and_change_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + test_and_change_bit(nr, addr); \ +}) + +static inline int constant_test_bit(int nr, const volatile void *addr) +{ + return ((1U << (nr & 31)) & + (((const volatile unsigned int *)addr)[nr >> 5])) != 0; +} + +static inline int variable_test_bit(int nr, const volatile void *addr) +{ + int oldbit; + + asm volatile ( "btl %[nr], %[addr]\n\t" + ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") + : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit) + : [addr] "m" (CONST_ADDR), [nr] "Ir" (nr) : "memory" ); + + return oldbit; +} + +#define test_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + __builtin_constant_p(nr) ? \ + constant_test_bit(nr, addr) : \ + variable_test_bit(nr, addr); \ +}) + +extern unsigned int __find_first_bit( + const unsigned long *addr, unsigned int size); +extern unsigned int __find_next_bit( + const unsigned long *addr, unsigned int size, unsigned int offset); +extern unsigned int __find_first_zero_bit( + const unsigned long *addr, unsigned int size); +extern unsigned int __find_next_zero_bit( + const unsigned long *addr, unsigned int size, unsigned int offset); + +static always_inline unsigned int __scanbit(unsigned long val, unsigned int max) +{ + if ( __builtin_constant_p(max) && max == BITS_PER_LONG ) + alternative_io("bsf %[in],%[out]; cmovz %[max],%k[out]", + "rep; bsf %[in],%[out]", + X86_FEATURE_BMI1, + [out] "=&r" (val), + [in] "r" (val), [max] "r" (max)); + else + asm ( "bsf %1,%0 ; cmovz %2,%k0" + : "=&r" (val) : "r" (val), "r" (max) ); + return (unsigned int)val; +} + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * containing a bit. + */ +#define find_first_bit(addr, size) find_next_bit(addr, size, 0) + +/** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +#define find_next_bit(addr, size, off) ({ \ + unsigned int r__; \ + const unsigned long *a__ = (addr); \ + unsigned int s__ = (size); \ + unsigned int o__ = (off); \ + if ( o__ >= s__ ) \ + r__ = s__; \ + else if ( __builtin_constant_p(size) && s__ <= BITS_PER_LONG ) \ + r__ = o__ + __scanbit(*(const unsigned long *)(a__) >> o__, s__); \ + else if ( __builtin_constant_p(off) && !o__ ) \ + r__ = __find_first_bit(a__, s__); \ + else \ + r__ = __find_next_bit(a__, s__, o__); \ + r__; \ +}) + +/** + * find_first_zero_bit - find the first zero bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first zero bit, not the number of the byte + * containing a bit. + */ +#define find_first_zero_bit(addr, size) find_next_zero_bit(addr, size, 0) + +/** + * find_next_zero_bit - find the first zero bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +#define find_next_zero_bit(addr, size, off) ({ \ + unsigned int r__; \ + const unsigned long *a__ = (addr); \ + unsigned int s__ = (size); \ + unsigned int o__ = (off); \ + if ( o__ >= s__ ) \ + r__ = s__; \ + else if ( __builtin_constant_p(size) && s__ <= BITS_PER_LONG ) \ + r__ = o__ + __scanbit(~*(const unsigned long *)(a__) >> o__, s__); \ + else if ( __builtin_constant_p(off) && !o__ ) \ + r__ = __find_first_zero_bit(a__, s__); \ + else \ + r__ = __find_next_zero_bit(a__, s__, o__); \ + r__; \ +}) + +/** + * find_first_set_bit - find the first set bit in @word + * @word: the word to search + * + * Returns the bit-number of the first set bit. The input must *not* be zero. + */ +static inline unsigned int find_first_set_bit(unsigned long word) +{ + asm ( "rep; bsf %1,%0" : "=r" (word) : "rm" (word) ); + return (unsigned int)word; +} + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as the libc and compiler builtin ffs routines. + */ +static inline int ffsl(unsigned long x) +{ + long r; + + asm ( "bsf %1,%0\n\t" + "jnz 1f\n\t" + "mov $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return (int)r+1; +} + +static inline int ffs(unsigned int x) +{ + int r; + + asm ( "bsf %1,%0\n\t" + "jnz 1f\n\t" + "mov $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return r + 1; +} + +/** + * fls - find last bit set + * @x: the word to search + * + * This is defined the same way as ffs. + */ +static inline int flsl(unsigned long x) +{ + long r; + + asm ( "bsr %1,%0\n\t" + "jnz 1f\n\t" + "mov $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return (int)r+1; +} + +static inline int fls(unsigned int x) +{ + int r; + + asm ( "bsr %1,%0\n\t" + "jnz 1f\n\t" + "mov $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return r + 1; +} + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ +#define hweight64(x) generic_hweight64(x) +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +#endif /* _X86_BITOPS_H */ diff --git a/xen/arch/x86/include/asm/bug.h b/xen/arch/x86/include/asm/bug.h new file mode 100644 index 0000000000..9bb4a19420 --- /dev/null +++ b/xen/arch/x86/include/asm/bug.h @@ -0,0 +1,125 @@ +#ifndef __X86_BUG_H__ +#define __X86_BUG_H__ + +#define BUG_DISP_WIDTH 24 +#define BUG_LINE_LO_WIDTH (31 - BUG_DISP_WIDTH) +#define BUG_LINE_HI_WIDTH (31 - BUG_DISP_WIDTH) + +#define BUGFRAME_run_fn 0 +#define BUGFRAME_warn 1 +#define BUGFRAME_bug 2 +#define BUGFRAME_assert 3 + +#define BUGFRAME_NR 4 + +#ifndef __ASSEMBLY__ + +struct bug_frame { + signed int loc_disp:BUG_DISP_WIDTH; + unsigned int line_hi:BUG_LINE_HI_WIDTH; + signed int ptr_disp:BUG_DISP_WIDTH; + unsigned int line_lo:BUG_LINE_LO_WIDTH; + signed int msg_disp[]; +}; + +#define bug_loc(b) ((const void *)(b) + (b)->loc_disp) +#define bug_ptr(b) ((const void *)(b) + (b)->ptr_disp) +#define bug_line(b) (((((b)->line_hi + ((b)->loc_disp < 0)) & \ + ((1 << BUG_LINE_HI_WIDTH) - 1)) << \ + BUG_LINE_LO_WIDTH) + \ + (((b)->line_lo + ((b)->ptr_disp < 0)) & \ + ((1 << BUG_LINE_LO_WIDTH) - 1))) +#define bug_msg(b) ((const char *)(b) + (b)->msg_disp[1]) + +#define _ASM_BUGFRAME_TEXT(second_frame) \ + ".Lbug%=: ud2\n" \ + ".pushsection .bug_frames.%c[bf_type], \"a\", @progbits\n" \ + ".p2align 2\n" \ + ".Lfrm%=:\n" \ + ".long (.Lbug%= - .Lfrm%=) + %c[bf_line_hi]\n" \ + ".long (%c[bf_ptr] - .Lfrm%=) + %c[bf_line_lo]\n" \ + ".if " #second_frame "\n" \ + ".long 0, %c[bf_msg] - .Lfrm%=\n" \ + ".endif\n" \ + ".popsection\n" \ + +#define _ASM_BUGFRAME_INFO(type, line, ptr, msg) \ + [bf_type] "i" (type), \ + [bf_ptr] "i" (ptr), \ + [bf_msg] "i" (msg), \ + [bf_line_lo] "i" ((line & ((1 << BUG_LINE_LO_WIDTH) - 1)) \ + << BUG_DISP_WIDTH), \ + [bf_line_hi] "i" (((line) >> BUG_LINE_LO_WIDTH) << BUG_DISP_WIDTH) + +#define BUG_FRAME(type, line, ptr, second_frame, msg) do { \ + BUILD_BUG_ON((line) >> (BUG_LINE_LO_WIDTH + BUG_LINE_HI_WIDTH)); \ + BUILD_BUG_ON((type) >= BUGFRAME_NR); \ + asm volatile ( _ASM_BUGFRAME_TEXT(second_frame) \ + :: _ASM_BUGFRAME_INFO(type, line, ptr, msg) ); \ +} while (0) + + +#define WARN() BUG_FRAME(BUGFRAME_warn, __LINE__, __FILE__, 0, NULL) +#define BUG() do { \ + BUG_FRAME(BUGFRAME_bug, __LINE__, __FILE__, 0, NULL); \ + unreachable(); \ +} while (0) + +#define run_in_exception_handler(fn) BUG_FRAME(BUGFRAME_run_fn, 0, fn, 0, NULL) + +#define assert_failed(msg) do { \ + BUG_FRAME(BUGFRAME_assert, __LINE__, __FILE__, 1, msg); \ + unreachable(); \ +} while (0) + +extern const struct bug_frame __start_bug_frames[], + __stop_bug_frames_0[], + __stop_bug_frames_1[], + __stop_bug_frames_2[], + __stop_bug_frames_3[]; + +#else /* !__ASSEMBLY__ */ + +/* + * Construct a bugframe, suitable for using in assembly code. Should always + * match the C version above. One complication is having to stash the strings + * in .rodata + */ + .macro BUG_FRAME type, line, file_str, second_frame, msg + + .if \type >= BUGFRAME_NR + .error "Invalid BUGFRAME index" + .endif + + .L\@ud: ud2a + + .pushsection .rodata.str1, "aMS", @progbits, 1 + .L\@s1: .asciz "\file_str" + .popsection + + .pushsection .bug_frames.\type, "a", @progbits + .p2align 2 + .L\@bf: + .long (.L\@ud - .L\@bf) + \ + ((\line >> BUG_LINE_LO_WIDTH) << BUG_DISP_WIDTH) + .long (.L\@s1 - .L\@bf) + \ + ((\line & ((1 << BUG_LINE_LO_WIDTH) - 1)) << BUG_DISP_WIDTH) + + .if \second_frame + .pushsection .rodata.str1, "aMS", @progbits, 1 + .L\@s2: .asciz "\msg" + .popsection + .long 0, (.L\@s2 - .L\@bf) + .endif + .popsection + .endm + +#define WARN BUG_FRAME BUGFRAME_warn, __LINE__, __FILE__, 0, 0 +#define BUG BUG_FRAME BUGFRAME_bug, __LINE__, __FILE__, 0, 0 + +#define ASSERT_FAILED(msg) \ + BUG_FRAME BUGFRAME_assert, __LINE__, __FILE__, 1, msg + +#endif /* !__ASSEMBLY__ */ + +#endif /* __X86_BUG_H__ */ diff --git a/xen/arch/x86/include/asm/byteorder.h b/xen/arch/x86/include/asm/byteorder.h new file mode 100644 index 0000000000..1f77e502a5 --- /dev/null +++ b/xen/arch/x86/include/asm/byteorder.h @@ -0,0 +1,36 @@ +#ifndef __ASM_X86_BYTEORDER_H__ +#define __ASM_X86_BYTEORDER_H__ + +#include +#include + +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) +{ + asm("bswap %0" : "=r" (x) : "0" (x)); + return x; +} + +static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) +{ + union { + struct { __u32 a,b; } s; + __u64 u; + } v; + v.u = val; + asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" + : "=r" (v.s.a), "=r" (v.s.b) + : "0" (v.s.a), "1" (v.s.b)); + return v.u; +} + +/* Do not define swab16. Gcc is smart enough to recognize "C" version and + convert it into rotation or exhange. */ + +#define __arch__swab64(x) ___arch__swab64(x) +#define __arch__swab32(x) ___arch__swab32(x) + +#define __BYTEORDER_HAS_U64__ + +#include + +#endif /* __ASM_X86_BYTEORDER_H__ */ diff --git a/xen/arch/x86/include/asm/bzimage.h b/xen/arch/x86/include/asm/bzimage.h new file mode 100644 index 0000000000..7ed69d3910 --- /dev/null +++ b/xen/arch/x86/include/asm/bzimage.h @@ -0,0 +1,11 @@ +#ifndef __X86_BZIMAGE_H__ +#define __X86_BZIMAGE_H__ + +#include + +unsigned long bzimage_headroom(void *image_start, unsigned long image_length); + +int bzimage_parse(void *image_base, void **image_start, + unsigned long *image_len); + +#endif /* __X86_BZIMAGE_H__ */ diff --git a/xen/arch/x86/include/asm/cache.h b/xen/arch/x86/include/asm/cache.h new file mode 100644 index 0000000000..1f7173d8c7 --- /dev/null +++ b/xen/arch/x86/include/asm/cache.h @@ -0,0 +1,14 @@ +/* + * include/asm-x86/cache.h + */ +#ifndef __ARCH_X86_CACHE_H +#define __ARCH_X86_CACHE_H + + +/* L1 cache line size */ +#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define __read_mostly __section(".data.read_mostly") + +#endif diff --git a/xen/arch/x86/include/asm/compat.h b/xen/arch/x86/include/asm/compat.h new file mode 100644 index 0000000000..818cad87db --- /dev/null +++ b/xen/arch/x86/include/asm/compat.h @@ -0,0 +1,20 @@ +/****************************************************************************** + * compat.h + */ + +#ifdef CONFIG_COMPAT + +#define COMPAT_BITS_PER_LONG 32 + +typedef uint32_t compat_ptr_t; +typedef unsigned long full_ptr_t; + +#endif + +struct domain; +#ifdef CONFIG_PV32 +int switch_compat(struct domain *); +#else +#include +static inline int switch_compat(struct domain *d) { return -EOPNOTSUPP; } +#endif diff --git a/xen/arch/x86/include/asm/config.h b/xen/arch/x86/include/asm/config.h new file mode 100644 index 0000000000..883c2ef0df --- /dev/null +++ b/xen/arch/x86/include/asm/config.h @@ -0,0 +1,329 @@ +/****************************************************************************** + * config.h + * + * A Linux-style configuration list. + */ + +#ifndef __X86_CONFIG_H__ +#define __X86_CONFIG_H__ + +#define LONG_BYTEORDER 3 +#define CONFIG_PAGING_LEVELS 4 + +#define BYTES_PER_LONG (1 << LONG_BYTEORDER) +#define BITS_PER_LONG (BYTES_PER_LONG << 3) +#define BITS_PER_BYTE 8 +#define POINTER_ALIGN BYTES_PER_LONG + +#define BITS_PER_LLONG 64 + +#define BITS_PER_XEN_ULONG BITS_PER_LONG + +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 +#define CONFIG_DISCONTIGMEM 1 +#define CONFIG_NUMA_EMU 1 +#define CONFIG_DOMAIN_PAGE 1 + +#define CONFIG_PAGEALLOC_MAX_ORDER (2 * PAGETABLE_ORDER) +#define CONFIG_DOMU_MAX_ORDER PAGETABLE_ORDER +#define CONFIG_HWDOM_MAX_ORDER 12 + +/* Intel P4 currently has largest cache line (L2 line size is 128 bytes). */ +#define CONFIG_X86_L1_CACHE_SHIFT 7 + +#define CONFIG_ACPI_NUMA 1 +#define CONFIG_ACPI_SRAT 1 +#define CONFIG_ACPI_CSTATE 1 + +#define CONFIG_WATCHDOG 1 + +#define CONFIG_MULTIBOOT 1 + +#define HZ 100 + +#define OPT_CONSOLE_STR "vga" + +/* Linkage for x86 */ +#ifdef __ASSEMBLY__ +#define ALIGN .align 16,0x90 +#define ENTRY(name) \ + .globl name; \ + ALIGN; \ + name: +#define GLOBAL(name) \ + .globl name; \ + name: +#endif + +#define NR_hypercalls 64 + +#ifndef NDEBUG +#define MEMORY_GUARD +#endif + +#define STACK_ORDER 3 +#define STACK_SIZE (PAGE_SIZE << STACK_ORDER) + +#define IST_SHSTK_SIZE 1024 + +#define TRAMPOLINE_STACK_SPACE PAGE_SIZE +#define TRAMPOLINE_SPACE (KB(64) - TRAMPOLINE_STACK_SPACE) +#define WAKEUP_STACK_MIN 3072 + +#define MBI_SPACE_MIN (2 * PAGE_SIZE) + +/* Primary stack is restricted to 8kB by guard pages. */ +#define PRIMARY_STACK_SIZE 8192 + +/* Primary shadow stack is slot 5 of 8, immediately under the primary stack. */ +#define PRIMARY_SHSTK_SLOT 5 + +/* Total size of syscall and emulation stubs. */ +#define STUB_BUF_SHIFT (L1_CACHE_SHIFT > 7 ? L1_CACHE_SHIFT : 7) +#define STUB_BUF_SIZE (1 << STUB_BUF_SHIFT) +#define STUBS_PER_PAGE (PAGE_SIZE / STUB_BUF_SIZE) + +/* Return value for zero-size _xmalloc(), distinguished from NULL. */ +#define ZERO_BLOCK_PTR ((void *)0xBAD0BAD0BAD0BAD0UL) + +/* Override include/xen/list.h to make these non-canonical addresses. */ +#define LIST_POISON1 ((void *)0x0100100100100100UL) +#define LIST_POISON2 ((void *)0x0200200200200200UL) + +#ifndef __ASSEMBLY__ +extern unsigned long trampoline_phys; +#define bootsym_phys(sym) \ + (((unsigned long)&(sym)-(unsigned long)&trampoline_start)+trampoline_phys) +#define bootsym(sym) \ + (*((typeof(sym) *)__va(bootsym_phys(sym)))) + +extern char trampoline_start[], trampoline_end[]; +extern char trampoline_realmode_entry[]; +extern unsigned int trampoline_xen_phys_start; +extern unsigned char trampoline_cpu_started; +extern char wakeup_start[]; + +extern unsigned char video_flags; + +extern unsigned short boot_edid_caps; +extern unsigned char boot_edid_info[128]; +#endif + +#include + +#define PML4_ENTRY_BITS 39 +#define PML4_ENTRY_BYTES (_AC(1,UL) << PML4_ENTRY_BITS) +#define PML4_ADDR(_slot) \ + (((_AC(_slot, UL) >> 8) * _AC(0xffff000000000000,UL)) | \ + (_AC(_slot, UL) << PML4_ENTRY_BITS)) + +/* + * Memory layout: + * 0x0000000000000000 - 0x00007fffffffffff [128TB, 2^47 bytes, PML4:0-255] + * Guest-defined use (see below for compatibility mode guests). + * 0x0000800000000000 - 0xffff7fffffffffff [16EB] + * Inaccessible: current arch only supports 48-bit sign-extended VAs. + * 0xffff800000000000 - 0xffff803fffffffff [256GB, 2^38 bytes, PML4:256] + * Read-only machine-to-phys translation table (GUEST ACCESSIBLE). + * 0xffff804000000000 - 0xffff807fffffffff [256GB, 2^38 bytes, PML4:256] + * Reserved for future shared info with the guest OS (GUEST ACCESSIBLE). + * 0xffff808000000000 - 0xffff80ffffffffff [512GB, 2^39 bytes, PML4:257] + * ioremap for PCI mmconfig space + * 0xffff810000000000 - 0xffff817fffffffff [512GB, 2^39 bytes, PML4:258] + * Guest linear page table. + * 0xffff818000000000 - 0xffff81ffffffffff [512GB, 2^39 bytes, PML4:259] + * Shadow linear page table. + * 0xffff820000000000 - 0xffff827fffffffff [512GB, 2^39 bytes, PML4:260] + * Per-domain mappings (e.g., GDT, LDT). + * 0xffff828000000000 - 0xffff82bfffffffff [256GB, 2^38 bytes, PML4:261] + * Machine-to-phys translation table. + * 0xffff82c000000000 - 0xffff82cfffffffff [64GB, 2^36 bytes, PML4:261] + * vmap()/ioremap()/fixmap area. + * 0xffff82d000000000 - 0xffff82d03fffffff [1GB, 2^30 bytes, PML4:261] + * Compatibility machine-to-phys translation table (CONFIG_PV32). + * 0xffff82d040000000 - 0xffff82d07fffffff [1GB, 2^30 bytes, PML4:261] + * Xen text, static data, bss. +#ifndef CONFIG_BIGMEM + * 0xffff82d080000000 - 0xffff82dfffffffff [62GB, PML4:261] + * Reserved for future use. + * 0xffff82e000000000 - 0xffff82ffffffffff [128GB, 2^37 bytes, PML4:261] + * Page-frame information array. + * 0xffff830000000000 - 0xffff87ffffffffff [5TB, 5*2^40 bytes, PML4:262-271] + * 1:1 direct mapping of all physical memory. +#else + * 0xffff82d080000000 - 0xffff82ffffffffff [190GB, PML4:261] + * Reserved for future use. + * 0xffff830000000000 - 0xffff847fffffffff [1.5TB, 3*2^39 bytes, PML4:262-264] + * Page-frame information array. + * 0xffff848000000000 - 0xffff87ffffffffff [3.5TB, 7*2^39 bytes, PML4:265-271] + * 1:1 direct mapping of all physical memory. +#endif + * 0xffff880000000000 - 0xffffffffffffffff [120TB, PML4:272-511] + * PV: Guest-defined use. + * 0xffff880000000000 - 0xffffff7fffffffff [119.5TB, PML4:272-510] + * HVM/idle: continuation of 1:1 mapping + * 0xffffff8000000000 - 0xffffffffffffffff [512GB, 2^39 bytes PML4:511] + * HVM/idle: unused + * + * Compatibility guest area layout: + * 0x0000000000000000 - 0x00000000f57fffff [3928MB, PML4:0] + * Guest-defined use. + * 0x00000000f5800000 - 0x00000000ffffffff [168MB, PML4:0] + * Read-only machine-to-phys translation table (GUEST ACCESSIBLE). + * 0x0000000100000000 - 0x000001ffffffffff [2TB-4GB, PML4:0-3] + * Unused / Reserved for future use. + * 0x0000020000000000 - 0x0000027fffffffff [512GB, 2^39 bytes, PML4:4] + * Mirror of per-domain mappings (for argument translation area; also HVM). + * 0x0000028000000000 - 0x00007fffffffffff [125.5TB, PML4:5-255] + * Unused / Reserved for future use. + */ + + +#define ROOT_PAGETABLE_FIRST_XEN_SLOT 256 +#define ROOT_PAGETABLE_LAST_XEN_SLOT 271 +#define ROOT_PAGETABLE_XEN_SLOTS \ + (L4_PAGETABLE_ENTRIES - ROOT_PAGETABLE_FIRST_XEN_SLOT - 1) +#define ROOT_PAGETABLE_PV_XEN_SLOTS \ + (ROOT_PAGETABLE_LAST_XEN_SLOT - ROOT_PAGETABLE_FIRST_XEN_SLOT + 1) + +/* Hypervisor reserves PML4 slots 256 to 271 inclusive. */ +#define HYPERVISOR_VIRT_START (PML4_ADDR(256)) +#define HYPERVISOR_VIRT_END (HYPERVISOR_VIRT_START + PML4_ENTRY_BYTES*16) +/* Slot 256: read-only guest-accessible machine-to-phys translation table. */ +#define RO_MPT_VIRT_START (PML4_ADDR(256)) +#define MPT_VIRT_SIZE (PML4_ENTRY_BYTES / 2) +#define RO_MPT_VIRT_END (RO_MPT_VIRT_START + MPT_VIRT_SIZE) +/* Slot 257: ioremap for PCI mmconfig space for 2048 segments (512GB) + * - full 16-bit segment support needs 44 bits + * - since PML4 slot has 39 bits, we limit segments to 2048 (11-bits) + */ +#define PCI_MCFG_VIRT_START (PML4_ADDR(257)) +#define PCI_MCFG_VIRT_END (PCI_MCFG_VIRT_START + PML4_ENTRY_BYTES) +/* Slot 258: linear page table (guest table). */ +#define LINEAR_PT_VIRT_START (PML4_ADDR(258)) +#define LINEAR_PT_VIRT_END (LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES) +/* Slot 259: linear page table (shadow table). */ +#define SH_LINEAR_PT_VIRT_START (PML4_ADDR(259)) +#define SH_LINEAR_PT_VIRT_END (SH_LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES) +/* Slot 260: per-domain mappings (including map cache). */ +#define PERDOMAIN_VIRT_START (PML4_ADDR(260)) +#define PERDOMAIN_SLOT_MBYTES (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER)) +#define PERDOMAIN_SLOTS 3 +#define PERDOMAIN_VIRT_SLOT(s) (PERDOMAIN_VIRT_START + (s) * \ + (PERDOMAIN_SLOT_MBYTES << 20)) +/* Slot 4: mirror of per-domain mappings (for compat xlat area accesses). */ +#define PERDOMAIN_ALT_VIRT_START PML4_ADDR(4) +/* Slot 261: machine-to-phys conversion table (256GB). */ +#define RDWR_MPT_VIRT_START (PML4_ADDR(261)) +#define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + MPT_VIRT_SIZE) +/* Slot 261: vmap()/ioremap()/fixmap area (64GB). */ +#define VMAP_VIRT_START RDWR_MPT_VIRT_END +#define VMAP_VIRT_END (VMAP_VIRT_START + GB(64)) +/* Slot 261: compatibility machine-to-phys conversion table (1GB). */ +#define RDWR_COMPAT_MPT_VIRT_START VMAP_VIRT_END +#define RDWR_COMPAT_MPT_VIRT_END (RDWR_COMPAT_MPT_VIRT_START + GB(1)) +/* Slot 261: xen text, static data, bss, per-cpu stubs and executable fixmap (1GB). */ +#define XEN_VIRT_START RDWR_COMPAT_MPT_VIRT_END +#define XEN_VIRT_END (XEN_VIRT_START + GB(1)) + +#ifndef CONFIG_BIGMEM +/* Slot 261: page-frame information array (128GB). */ +#define FRAMETABLE_SIZE GB(128) +#else +/* Slot 262-264: page-frame information array (1.5TB). */ +#define FRAMETABLE_SIZE GB(1536) +#endif +#define FRAMETABLE_VIRT_END DIRECTMAP_VIRT_START +#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) +#define FRAMETABLE_VIRT_START (FRAMETABLE_VIRT_END - FRAMETABLE_SIZE) + +#ifndef CONFIG_BIGMEM +/* Slot 262-271/510: A direct 1:1 mapping of all of physical memory. */ +#define DIRECTMAP_VIRT_START (PML4_ADDR(262)) +#define DIRECTMAP_SIZE (PML4_ENTRY_BYTES * (511 - 262)) +#else +/* Slot 265-271/510: A direct 1:1 mapping of all of physical memory. */ +#define DIRECTMAP_VIRT_START (PML4_ADDR(265)) +#define DIRECTMAP_SIZE (PML4_ENTRY_BYTES * (511 - 265)) +#endif +#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE) + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_PV32 + +/* This is not a fixed value, just a lower limit. */ +#define __HYPERVISOR_COMPAT_VIRT_START 0xF5800000 +#define HYPERVISOR_COMPAT_VIRT_START(d) ((d)->arch.hv_compat_vstart) + +#else /* !CONFIG_PV32 */ + +#define HYPERVISOR_COMPAT_VIRT_START(d) ((void)(d), 0) + +#endif /* CONFIG_PV32 */ + +#define MACH2PHYS_COMPAT_VIRT_START HYPERVISOR_COMPAT_VIRT_START +#define MACH2PHYS_COMPAT_VIRT_END 0xFFE00000 +#define MACH2PHYS_COMPAT_NR_ENTRIES(d) \ + ((MACH2PHYS_COMPAT_VIRT_END-MACH2PHYS_COMPAT_VIRT_START(d))>>2) + +#define COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d) \ + l2_table_offset(HYPERVISOR_COMPAT_VIRT_START(d)) +#define COMPAT_L2_PAGETABLE_LAST_XEN_SLOT l2_table_offset(~0U) +#define COMPAT_L2_PAGETABLE_XEN_SLOTS(d) \ + (COMPAT_L2_PAGETABLE_LAST_XEN_SLOT - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d) + 1) + +#define COMPAT_LEGACY_MAX_VCPUS XEN_LEGACY_MAX_VCPUS +#define COMPAT_HAVE_PV_GUEST_ENTRY XEN_HAVE_PV_GUEST_ENTRY +#define COMPAT_HAVE_PV_UPCALL_MASK XEN_HAVE_PV_UPCALL_MASK + +#endif + +#define __HYPERVISOR_CS 0xe008 +#define __HYPERVISOR_DS64 0x0000 +#define __HYPERVISOR_DS32 0xe010 +#define __HYPERVISOR_DS __HYPERVISOR_DS64 + +#define SYMBOLS_ORIGIN XEN_VIRT_START + +/* For generic assembly code: use macros to define operation/operand sizes. */ +#define __OS "q" /* Operation Suffix */ +#define __OP "r" /* Operand Prefix */ + +#ifndef __ASSEMBLY__ +extern unsigned long xen_phys_start; +#endif + +/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */ +#define GDT_LDT_VCPU_SHIFT 5 +#define GDT_LDT_VCPU_VA_SHIFT (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT) +#define GDT_LDT_MBYTES PERDOMAIN_SLOT_MBYTES +#define MAX_VIRT_CPUS (GDT_LDT_MBYTES << (20-GDT_LDT_VCPU_VA_SHIFT)) +#define GDT_LDT_VIRT_START PERDOMAIN_VIRT_SLOT(0) +#define GDT_LDT_VIRT_END (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20)) + +/* The address of a particular VCPU's GDT or LDT. */ +#define GDT_VIRT_START(v) \ + (PERDOMAIN_VIRT_START + ((v)->vcpu_id << GDT_LDT_VCPU_VA_SHIFT)) +#define LDT_VIRT_START(v) \ + (GDT_VIRT_START(v) + (64*1024)) + +/* map_domain_page() map cache. The second per-domain-mapping sub-area. */ +#define MAPCACHE_VCPU_ENTRIES (CONFIG_PAGING_LEVELS * CONFIG_PAGING_LEVELS) +#define MAPCACHE_ENTRIES (MAX_VIRT_CPUS * MAPCACHE_VCPU_ENTRIES) +#define MAPCACHE_VIRT_START PERDOMAIN_VIRT_SLOT(1) +#define MAPCACHE_VIRT_END (MAPCACHE_VIRT_START + \ + MAPCACHE_ENTRIES * PAGE_SIZE) + +/* Argument translation area. The third per-domain-mapping sub-area. */ +#define ARG_XLAT_VIRT_START PERDOMAIN_VIRT_SLOT(2) +/* Allow for at least one guard page (COMPAT_ARG_XLAT_SIZE being 2 pages): */ +#define ARG_XLAT_VA_SHIFT (2 + PAGE_SHIFT) +#define ARG_XLAT_START(v) \ + (ARG_XLAT_VIRT_START + ((v)->vcpu_id << ARG_XLAT_VA_SHIFT)) + +#define ELFSIZE 64 + +#define ARCH_CRASH_SAVE_VMCOREINFO + +#endif /* __X86_CONFIG_H__ */ diff --git a/xen/arch/x86/include/asm/cpufeature.h b/xen/arch/x86/include/asm/cpufeature.h new file mode 100644 index 0000000000..4754940e23 --- /dev/null +++ b/xen/arch/x86/include/asm/cpufeature.h @@ -0,0 +1,214 @@ +/* + * cpufeature.h + * + * Defines x86 CPU feature bits + */ +#ifndef __ASM_I386_CPUFEATURE_H +#define __ASM_I386_CPUFEATURE_H + +#include +#include + +#define cpufeat_word(idx) ((idx) / 32) +#define cpufeat_bit(idx) ((idx) % 32) +#define cpufeat_mask(idx) (_AC(1, U) << cpufeat_bit(idx)) + +/* An alias of a feature we know is always going to be present. */ +#define X86_FEATURE_ALWAYS X86_FEATURE_LM + +#ifndef __ASSEMBLY__ +#include + +#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) +#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) + +#define CPUID_PM_LEAF 6 +#define CPUID6_ECX_APERFMPERF_CAPABILITY 0x1 + +/* CPUID level 0x00000001.edx */ +#define cpu_has_fpu 1 +#define cpu_has_de 1 +#define cpu_has_pse 1 +#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) +#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) +#define cpu_has_mtrr 1 +#define cpu_has_pge 1 +#define cpu_has_pse36 boot_cpu_has(X86_FEATURE_PSE36) +#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) +#define cpu_has_mmx 1 +#define cpu_has_htt boot_cpu_has(X86_FEATURE_HTT) + +/* CPUID level 0x00000001.ecx */ +#define cpu_has_sse3 boot_cpu_has(X86_FEATURE_SSE3) +#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) +#define cpu_has_monitor boot_cpu_has(X86_FEATURE_MONITOR) +#define cpu_has_vmx boot_cpu_has(X86_FEATURE_VMX) +#define cpu_has_eist boot_cpu_has(X86_FEATURE_EIST) +#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) +#define cpu_has_fma boot_cpu_has(X86_FEATURE_FMA) +#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) +#define cpu_has_pdcm boot_cpu_has(X86_FEATURE_PDCM) +#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID) +#define cpu_has_sse4_1 boot_cpu_has(X86_FEATURE_SSE4_1) +#define cpu_has_sse4_2 boot_cpu_has(X86_FEATURE_SSE4_2) +#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) +#define cpu_has_popcnt boot_cpu_has(X86_FEATURE_POPCNT) +#define cpu_has_aesni boot_cpu_has(X86_FEATURE_AESNI) +#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) +#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) +#define cpu_has_f16c boot_cpu_has(X86_FEATURE_F16C) +#define cpu_has_rdrand boot_cpu_has(X86_FEATURE_RDRAND) +#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) + +/* CPUID level 0x80000001.edx */ +#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) +#define cpu_has_page1gb boot_cpu_has(X86_FEATURE_PAGE1GB) +#define cpu_has_rdtscp boot_cpu_has(X86_FEATURE_RDTSCP) +#define cpu_has_3dnow_ext boot_cpu_has(X86_FEATURE_3DNOWEXT) +#define cpu_has_3dnow boot_cpu_has(X86_FEATURE_3DNOW) + +/* CPUID level 0x80000001.ecx */ +#define cpu_has_cmp_legacy boot_cpu_has(X86_FEATURE_CMP_LEGACY) +#define cpu_has_svm boot_cpu_has(X86_FEATURE_SVM) +#define cpu_has_sse4a boot_cpu_has(X86_FEATURE_SSE4A) +#define cpu_has_xop boot_cpu_has(X86_FEATURE_XOP) +#define cpu_has_skinit boot_cpu_has(X86_FEATURE_SKINIT) +#define cpu_has_fma4 boot_cpu_has(X86_FEATURE_FMA4) +#define cpu_has_tbm boot_cpu_has(X86_FEATURE_TBM) + +/* CPUID level 0x0000000D:1.eax */ +#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) +#define cpu_has_xsavec boot_cpu_has(X86_FEATURE_XSAVEC) +#define cpu_has_xgetbv1 boot_cpu_has(X86_FEATURE_XGETBV1) +#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) + +/* CPUID level 0x00000007:0.ebx */ +#define cpu_has_bmi1 boot_cpu_has(X86_FEATURE_BMI1) +#define cpu_has_hle boot_cpu_has(X86_FEATURE_HLE) +#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) +#define cpu_has_smep boot_cpu_has(X86_FEATURE_SMEP) +#define cpu_has_bmi2 boot_cpu_has(X86_FEATURE_BMI2) +#define cpu_has_invpcid boot_cpu_has(X86_FEATURE_INVPCID) +#define cpu_has_rtm boot_cpu_has(X86_FEATURE_RTM) +#define cpu_has_pqe boot_cpu_has(X86_FEATURE_PQE) +#define cpu_has_fpu_sel (!boot_cpu_has(X86_FEATURE_NO_FPU_SEL)) +#define cpu_has_mpx boot_cpu_has(X86_FEATURE_MPX) +#define cpu_has_avx512f boot_cpu_has(X86_FEATURE_AVX512F) +#define cpu_has_avx512dq boot_cpu_has(X86_FEATURE_AVX512DQ) +#define cpu_has_rdseed boot_cpu_has(X86_FEATURE_RDSEED) +#define cpu_has_smap boot_cpu_has(X86_FEATURE_SMAP) +#define cpu_has_avx512_ifma boot_cpu_has(X86_FEATURE_AVX512_IFMA) +#define cpu_has_clflushopt boot_cpu_has(X86_FEATURE_CLFLUSHOPT) +#define cpu_has_clwb boot_cpu_has(X86_FEATURE_CLWB) +#define cpu_has_avx512er boot_cpu_has(X86_FEATURE_AVX512ER) +#define cpu_has_avx512cd boot_cpu_has(X86_FEATURE_AVX512CD) +#define cpu_has_proc_trace boot_cpu_has(X86_FEATURE_PROC_TRACE) +#define cpu_has_sha boot_cpu_has(X86_FEATURE_SHA) +#define cpu_has_avx512bw boot_cpu_has(X86_FEATURE_AVX512BW) +#define cpu_has_avx512vl boot_cpu_has(X86_FEATURE_AVX512VL) + +/* CPUID level 0x00000007:0.ecx */ +#define cpu_has_avx512_vbmi boot_cpu_has(X86_FEATURE_AVX512_VBMI) +#define cpu_has_avx512_vbmi2 boot_cpu_has(X86_FEATURE_AVX512_VBMI2) +#define cpu_has_gfni boot_cpu_has(X86_FEATURE_GFNI) +#define cpu_has_vaes boot_cpu_has(X86_FEATURE_VAES) +#define cpu_has_vpclmulqdq boot_cpu_has(X86_FEATURE_VPCLMULQDQ) +#define cpu_has_avx512_vnni boot_cpu_has(X86_FEATURE_AVX512_VNNI) +#define cpu_has_avx512_bitalg boot_cpu_has(X86_FEATURE_AVX512_BITALG) +#define cpu_has_avx512_vpopcntdq boot_cpu_has(X86_FEATURE_AVX512_VPOPCNTDQ) +#define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID) +#define cpu_has_movdiri boot_cpu_has(X86_FEATURE_MOVDIRI) +#define cpu_has_movdir64b boot_cpu_has(X86_FEATURE_MOVDIR64B) +#define cpu_has_enqcmd boot_cpu_has(X86_FEATURE_ENQCMD) + +/* CPUID level 0x80000007.edx */ +#define cpu_has_hw_pstate boot_cpu_has(X86_FEATURE_HW_PSTATE) +#define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC) + +/* CPUID level 0x80000008.ebx */ +#define cpu_has_amd_ssbd boot_cpu_has(X86_FEATURE_AMD_SSBD) +#define cpu_has_virt_ssbd boot_cpu_has(X86_FEATURE_VIRT_SSBD) +#define cpu_has_ssb_no boot_cpu_has(X86_FEATURE_SSB_NO) + +/* CPUID level 0x00000007:0.edx */ +#define cpu_has_avx512_4vnniw boot_cpu_has(X86_FEATURE_AVX512_4VNNIW) +#define cpu_has_avx512_4fmaps boot_cpu_has(X86_FEATURE_AVX512_4FMAPS) +#define cpu_has_avx512_vp2intersect boot_cpu_has(X86_FEATURE_AVX512_VP2INTERSECT) +#define cpu_has_rtm_always_abort boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) +#define cpu_has_tsx_force_abort boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) +#define cpu_has_serialize boot_cpu_has(X86_FEATURE_SERIALIZE) +#define cpu_has_arch_caps boot_cpu_has(X86_FEATURE_ARCH_CAPS) + +/* CPUID level 0x00000007:1.eax */ +#define cpu_has_avx_vnni boot_cpu_has(X86_FEATURE_AVX_VNNI) +#define cpu_has_avx512_bf16 boot_cpu_has(X86_FEATURE_AVX512_BF16) + +/* Synthesized. */ +#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) +#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING) +#define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF) +#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH) +#define cpu_has_nscb boot_cpu_has(X86_FEATURE_NSCB) +#define cpu_has_xen_lbr boot_cpu_has(X86_FEATURE_XEN_LBR) +#define cpu_has_xen_shstk boot_cpu_has(X86_FEATURE_XEN_SHSTK) + +#define cpu_has_msr_tsc_aux (cpu_has_rdtscp || cpu_has_rdpid) + +/* Bugs. */ +#define cpu_bug_fpu_ptrs boot_cpu_has(X86_BUG_FPU_PTRS) +#define cpu_bug_null_seg boot_cpu_has(X86_BUG_NULL_SEG) + +enum _cache_type { + CACHE_TYPE_NULL = 0, + CACHE_TYPE_DATA = 1, + CACHE_TYPE_INST = 2, + CACHE_TYPE_UNIFIED = 3 +}; + +union _cpuid4_leaf_eax { + struct { + enum _cache_type type:5; + unsigned int level:3; + unsigned int is_self_initializing:1; + unsigned int is_fully_associative:1; + unsigned int reserved:4; + unsigned int num_threads_sharing:12; + unsigned int num_cores_on_die:6; + } split; + u32 full; +}; + +union _cpuid4_leaf_ebx { + struct { + unsigned int coherency_line_size:12; + unsigned int physical_line_partition:10; + unsigned int ways_of_associativity:10; + } split; + u32 full; +}; + +union _cpuid4_leaf_ecx { + struct { + unsigned int number_of_sets:32; + } split; + u32 full; +}; + +struct cpuid4_info { + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + unsigned long size; +}; + +int cpuid4_cache_lookup(int index, struct cpuid4_info *this_leaf); +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_I386_CPUFEATURE_H */ + +/* + * Local Variables: + * mode:c + * comment-column:42 + * End: + */ diff --git a/xen/arch/x86/include/asm/cpufeatures.h b/xen/arch/x86/include/asm/cpufeatures.h new file mode 100644 index 0000000000..b10154fc44 --- /dev/null +++ b/xen/arch/x86/include/asm/cpufeatures.h @@ -0,0 +1,51 @@ +/* + * Explicitly intended for multiple inclusion. + */ + +#include + +/* Number of capability words covered by the featureset words. */ +#define FSCAPINTS FEATURESET_NR_ENTRIES + +/* Synthetic words follow the featureset words. */ +#define X86_NR_SYNTH 1 +#define X86_SYNTH(x) (FSCAPINTS * 32 + (x)) + +/* Synthetic features */ +XEN_CPUFEATURE(CONSTANT_TSC, X86_SYNTH( 0)) /* TSC ticks at a constant rate */ +XEN_CPUFEATURE(NONSTOP_TSC, X86_SYNTH( 1)) /* TSC does not stop in C states */ +XEN_CPUFEATURE(ARAT, X86_SYNTH( 2)) /* Always running APIC timer */ +XEN_CPUFEATURE(ARCH_PERFMON, X86_SYNTH( 3)) /* Intel Architectural PerfMon */ +XEN_CPUFEATURE(TSC_RELIABLE, X86_SYNTH( 4)) /* TSC is known to be reliable */ +XEN_CPUFEATURE(XTOPOLOGY, X86_SYNTH( 5)) /* cpu topology enum extensions */ +XEN_CPUFEATURE(CPUID_FAULTING, X86_SYNTH( 6)) /* cpuid faulting */ +XEN_CPUFEATURE(CLFLUSH_MONITOR, X86_SYNTH( 7)) /* clflush reqd with monitor */ +XEN_CPUFEATURE(APERFMPERF, X86_SYNTH( 8)) /* APERFMPERF */ +XEN_CPUFEATURE(MFENCE_RDTSC, X86_SYNTH( 9)) /* MFENCE synchronizes RDTSC */ +XEN_CPUFEATURE(XEN_SMEP, X86_SYNTH(10)) /* SMEP gets used by Xen itself */ +XEN_CPUFEATURE(XEN_SMAP, X86_SYNTH(11)) /* SMAP gets used by Xen itself */ +/* Bit 12 - unused. */ +XEN_CPUFEATURE(IND_THUNK_LFENCE, X86_SYNTH(13)) /* Use IND_THUNK_LFENCE */ +XEN_CPUFEATURE(IND_THUNK_JMP, X86_SYNTH(14)) /* Use IND_THUNK_JMP */ +XEN_CPUFEATURE(SC_NO_BRANCH_HARDEN, X86_SYNTH(15)) /* (Disable) Conditional branch hardening */ +XEN_CPUFEATURE(SC_MSR_PV, X86_SYNTH(16)) /* MSR_SPEC_CTRL used by Xen for PV */ +XEN_CPUFEATURE(SC_MSR_HVM, X86_SYNTH(17)) /* MSR_SPEC_CTRL used by Xen for HVM */ +XEN_CPUFEATURE(SC_RSB_PV, X86_SYNTH(18)) /* RSB overwrite needed for PV */ +XEN_CPUFEATURE(SC_RSB_HVM, X86_SYNTH(19)) /* RSB overwrite needed for HVM */ +XEN_CPUFEATURE(XEN_SELFSNOOP, X86_SYNTH(20)) /* SELFSNOOP gets used by Xen itself */ +XEN_CPUFEATURE(SC_MSR_IDLE, X86_SYNTH(21)) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */ +XEN_CPUFEATURE(XEN_LBR, X86_SYNTH(22)) /* Xen uses MSR_DEBUGCTL.LBR */ +XEN_CPUFEATURE(SC_VERW_PV, X86_SYNTH(23)) /* VERW used by Xen for PV */ +XEN_CPUFEATURE(SC_VERW_HVM, X86_SYNTH(24)) /* VERW used by Xen for HVM */ +XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */ +XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */ + +/* Bug words follow the synthetic words. */ +#define X86_NR_BUG 1 +#define X86_BUG(x) ((FSCAPINTS + X86_NR_SYNTH) * 32 + (x)) + +#define X86_BUG_FPU_PTRS X86_BUG( 0) /* (F)X{SAVE,RSTOR} doesn't save/restore FOP/FIP/FDP. */ +#define X86_BUG_NULL_SEG X86_BUG( 1) /* NULL-ing a selector preserves the base and limit. */ + +/* Total number of capability words, inc synth and bug words. */ +#define NCAPINTS (FSCAPINTS + X86_NR_SYNTH + X86_NR_BUG) /* N 32-bit words worth of info */ diff --git a/xen/arch/x86/include/asm/cpufeatureset.h b/xen/arch/x86/include/asm/cpufeatureset.h new file mode 100644 index 0000000000..f179229f19 --- /dev/null +++ b/xen/arch/x86/include/asm/cpufeatureset.h @@ -0,0 +1,40 @@ +#ifndef __XEN_X86_CPUFEATURESET_H__ +#define __XEN_X86_CPUFEATURESET_H__ + +#ifndef __ASSEMBLY__ + +#include + +#define XEN_CPUFEATURE(name, value) X86_FEATURE_##name = value, +enum { +#include +#include +}; +#undef XEN_CPUFEATURE + +#define XEN_CPUFEATURE(name, value) asm (".equ X86_FEATURE_" #name ", " \ + __stringify(value)); +#include +#include + +#else /* !__ASSEMBLY__ */ + +#define XEN_CPUFEATURE(name, value) .equ X86_FEATURE_##name, value +#include +#include + +#endif /* __ASSEMBLY__ */ + +#undef XEN_CPUFEATURE + +#endif /* !__XEN_X86_CPUFEATURESET_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/cpuid.h b/xen/arch/x86/include/asm/cpuid.h new file mode 100644 index 0000000000..46904061d0 --- /dev/null +++ b/xen/arch/x86/include/asm/cpuid.h @@ -0,0 +1,80 @@ +#ifndef __X86_CPUID_H__ +#define __X86_CPUID_H__ + +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include + +#include +#include + +#include + +extern const uint32_t known_features[FSCAPINTS]; + +void init_guest_cpuid(void); + +/* + * Expected levelling capabilities (given cpuid vendor/family information), + * and levelling capabilities actually available (given MSR probing). + */ +#define LCAP_faulting XEN_SYSCTL_CPU_LEVELCAP_faulting +#define LCAP_1cd (XEN_SYSCTL_CPU_LEVELCAP_ecx | \ + XEN_SYSCTL_CPU_LEVELCAP_edx) +#define LCAP_e1cd (XEN_SYSCTL_CPU_LEVELCAP_extd_ecx | \ + XEN_SYSCTL_CPU_LEVELCAP_extd_edx) +#define LCAP_Da1 XEN_SYSCTL_CPU_LEVELCAP_xsave_eax +#define LCAP_6c XEN_SYSCTL_CPU_LEVELCAP_thermal_ecx +#define LCAP_7ab0 (XEN_SYSCTL_CPU_LEVELCAP_l7s0_eax | \ + XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx) +extern unsigned int expected_levelling_cap, levelling_caps; + +struct cpuidmasks +{ + uint64_t _1cd; + uint64_t e1cd; + uint64_t Da1; + uint64_t _6c; + uint64_t _7ab0; +}; + +/* Per CPU shadows of masking MSR values, for lazy context switching. */ +DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks); + +/* Default masking MSR values, calculated at boot. */ +extern struct cpuidmasks cpuidmask_defaults; + +extern struct cpuid_policy raw_cpuid_policy, host_cpuid_policy, + pv_max_cpuid_policy, pv_def_cpuid_policy, + hvm_max_cpuid_policy, hvm_def_cpuid_policy; + +extern const struct cpu_policy system_policies[]; + +/* Check that all previously present features are still available. */ +bool recheck_cpu_features(unsigned int cpu); + +/* Allocate and initialise a CPUID policy suitable for the domain. */ +int init_domain_cpuid_policy(struct domain *d); + +/* Clamp the CPUID policy to reality. */ +void recalculate_cpuid_policy(struct domain *d); + +struct vcpu; +void guest_cpuid(const struct vcpu *v, uint32_t leaf, + uint32_t subleaf, struct cpuid_leaf *res); + +#endif /* __ASSEMBLY__ */ +#endif /* !__X86_CPUID_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/cpuidle.h b/xen/arch/x86/include/asm/cpuidle.h new file mode 100644 index 0000000000..0981a8fd64 --- /dev/null +++ b/xen/arch/x86/include/asm/cpuidle.h @@ -0,0 +1,31 @@ +#ifndef __ASM_X86_CPUIDLE_H__ +#define __ASM_X86_CPUIDLE_H__ + +#include +#include +#include + +extern struct acpi_processor_power *processor_powers[]; + +extern void (*pm_idle_save)(void); + +bool lapic_timer_init(void); +extern void (*lapic_timer_off)(void); +extern void (*lapic_timer_on)(void); + +extern uint64_t (*cpuidle_get_tick)(void); + +int mwait_idle_init(struct notifier_block *); +int cpuidle_init_cpu(unsigned int cpu); +void default_dead_idle(void); +void acpi_dead_idle(void); +void play_dead(void); +void trace_exit_reason(u32 *irq_traced); +void update_idle_stats(struct acpi_processor_power *, + struct acpi_processor_cx *, uint64_t, uint64_t); +void update_last_cx_stat(struct acpi_processor_power *, + struct acpi_processor_cx *, uint64_t); + +bool errata_c6_workaround(void); + +#endif /* __X86_ASM_CPUIDLE_H__ */ diff --git a/xen/arch/x86/include/asm/current.h b/xen/arch/x86/include/asm/current.h new file mode 100644 index 0000000000..cfbedc3198 --- /dev/null +++ b/xen/arch/x86/include/asm/current.h @@ -0,0 +1,210 @@ +/****************************************************************************** + * current.h + * + * Information structure that lives at the bottom of the per-cpu Xen stack. + */ + +#ifndef __X86_CURRENT_H__ +#define __X86_CURRENT_H__ + +#include +#include +#include + +/* + * Xen's cpu stacks are 8 pages (8-page aligned), arranged as: + * + * 7 - Primary stack (with a struct cpu_info at the top) + * 6 - Primary stack + * 5 - Primay Shadow Stack (read-only) + * 4 - #DF IST stack + * 3 - #DB IST stack + * 2 - NMI IST stack + * 1 - #MC IST stack + * 0 - IST Shadow Stacks (4x 1k, read-only) + */ + +/* + * Identify which stack page the stack pointer is on. Returns an index + * as per the comment above. + */ +static inline unsigned int get_stack_page(unsigned long sp) +{ + return (sp & (STACK_SIZE-1)) >> PAGE_SHIFT; +} + +struct vcpu; + +struct cpu_info { + struct cpu_user_regs guest_cpu_user_regs; + unsigned int processor_id; + unsigned int verw_sel; + struct vcpu *current_vcpu; + unsigned long per_cpu_offset; + unsigned long cr4; + /* + * Of the two following fields the latter is being set to the CR3 value + * to be used on the given pCPU for loading whenever 64-bit PV guest + * context is being entered. A value of zero indicates no setting of CR3 + * is to be performed. + * The former is the value to restore when re-entering Xen, if any. IOW + * its value being zero means there's nothing to restore. + */ + unsigned long xen_cr3; + unsigned long pv_cr3; + + /* See asm/spec_ctrl_asm.h for usage. */ + unsigned int shadow_spec_ctrl; + uint8_t xen_spec_ctrl; + uint8_t spec_ctrl_flags; + + /* + * The following field controls copying of the L4 page table of 64-bit + * PV guests to the per-cpu root page table on entering the guest context. + * If set the L4 page table is being copied to the root page table and + * the field will be reset. + */ + bool root_pgt_changed; + + /* + * use_pv_cr3 is set in case the value of pv_cr3 is to be written into + * CR3 when returning from an interrupt. The main use is when returning + * from a NMI or MCE to hypervisor code where pv_cr3 was active. + */ + bool use_pv_cr3; + + unsigned long __pad; + /* get_stack_bottom() must be 16-byte aligned */ +}; + +static inline struct cpu_info *get_cpu_info_from_stack(unsigned long sp) +{ + return (struct cpu_info *)((sp | (STACK_SIZE - 1)) + 1) - 1; +} + +static inline struct cpu_info *get_cpu_info(void) +{ +#ifdef __clang__ + /* Clang complains that sp in the else case is not initialised. */ + unsigned long sp; + asm ( "mov %%rsp, %0" : "=r" (sp) ); +#else + register unsigned long sp asm("rsp"); +#endif + + return get_cpu_info_from_stack(sp); +} + +#define get_current() (get_cpu_info()->current_vcpu) +#define set_current(vcpu) (get_cpu_info()->current_vcpu = (vcpu)) +#define current (get_current()) + +#define get_processor_id() (get_cpu_info()->processor_id) +#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) + +/* + * Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points + * into the middle of cpu_info.guest_cpu_user_regs, at the section that + * precisely corresponds to a CPU trap frame. + */ +#define get_stack_bottom() \ + ((unsigned long)&get_cpu_info()->guest_cpu_user_regs.es) + +/* + * Get the reasonable stack bounds for stack traces and stack dumps. Stack + * dumps have a slightly larger range to include exception frames in the + * printed information. The returned word is inside the interesting range. + */ +unsigned long get_stack_trace_bottom(unsigned long sp); +unsigned long get_stack_dump_bottom (unsigned long sp); + +#ifdef CONFIG_LIVEPATCH +# define CHECK_FOR_LIVEPATCH_WORK "call check_for_livepatch_work;" +#elif defined(CONFIG_DEBUG) +/* Mimic the clobbering effect a call has on registers. */ +# define CHECK_FOR_LIVEPATCH_WORK \ + "mov $0x1234567890abcdef, %%rax\n\t" \ + "mov %%rax, %%rcx; mov %%rax, %%rdx\n\t" \ + "mov %%rax, %%rsi; mov %%rax, %%rdi\n\t" \ + "mov %%rax, %%r8; mov %%rax, %%r9\n\t" \ + "mov %%rax, %%r10; mov %%rax, %%r11\n\t" +#else +# define CHECK_FOR_LIVEPATCH_WORK "" +#endif + +#ifdef CONFIG_XEN_SHSTK +/* + * We need to unwind the primary shadow stack to its supervisor token, located + * in the last word of the primary shadow stack. + * + * Read the shadow stack pointer, subtract it from supervisor token position, + * and divide by 8 to get the number of slots needing popping. + * + * INCSSPQ can't pop more than 255 entries. We shouldn't ever need to pop + * that many entries, and getting this wrong will cause us to #DF later. Turn + * it into a BUG() now for fractionally easier debugging. + */ +# define SHADOW_STACK_WORK \ + "mov $1, %[ssp];" \ + "rdsspd %[ssp];" \ + "cmp $1, %[ssp];" \ + "je .L_shstk_done.%=;" /* CET not active? Skip. */ \ + "mov $%c[skstk_base], %[val];" \ + "and $%c[stack_mask], %[ssp];" \ + "sub %[ssp], %[val];" \ + "shr $3, %[val];" \ + "cmp $255, %[val];" /* More than 255 entries? Crash. */ \ + UNLIKELY_START(a, shstk_adjust) \ + _ASM_BUGFRAME_TEXT(0) \ + UNLIKELY_END_SECTION ";" \ + "incsspq %q[val];" \ + ".L_shstk_done.%=:" +#else +# define SHADOW_STACK_WORK "" +#endif + +#if __GNUC__ >= 9 +# define ssaj_has_attr_noreturn(fn) __builtin_has_attribute(fn, __noreturn__) +#else +/* Simply can't check the property with older gcc. */ +# define ssaj_has_attr_noreturn(fn) true +#endif + +#define switch_stack_and_jump(fn, instr, constr) \ + ({ \ + unsigned int tmp; \ + (void)((fn) == (void (*)(void))NULL); \ + BUILD_BUG_ON(!ssaj_has_attr_noreturn(fn)); \ + __asm__ __volatile__ ( \ + SHADOW_STACK_WORK \ + "mov %[stk], %%rsp;" \ + CHECK_FOR_LIVEPATCH_WORK \ + instr "[fun]" \ + : [val] "=&r" (tmp), \ + [ssp] "=&r" (tmp) \ + : [stk] "r" (guest_cpu_user_regs()), \ + [fun] constr (fn), \ + [skstk_base] "i" \ + ((PRIMARY_SHSTK_SLOT + 1) * PAGE_SIZE - 8), \ + [stack_mask] "i" (STACK_SIZE - 1), \ + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, \ + __FILE__, NULL) \ + : "memory" ); \ + unreachable(); \ + }) + +#define reset_stack_and_jump(fn) \ + switch_stack_and_jump(fn, "jmp %c", "i") + +/* The constraint may only specify non-call-clobbered registers. */ +#define reset_stack_and_jump_ind(fn) \ + switch_stack_and_jump(fn, "INDIRECT_JMP %", "b") + +/* + * Which VCPU's state is currently running on each CPU? + * This is not necesasrily the same as 'current' as a CPU may be + * executing a lazy state switch. + */ +DECLARE_PER_CPU(struct vcpu *, curr_vcpu); + +#endif /* __X86_CURRENT_H__ */ diff --git a/xen/arch/x86/include/asm/debugger.h b/xen/arch/x86/include/asm/debugger.h new file mode 100644 index 0000000000..99803bfd0c --- /dev/null +++ b/xen/arch/x86/include/asm/debugger.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * asm/debugger.h + * + * Generic hooks into arch-dependent Xen. + * + * Each debugger should define two functions here: + * + * 1. debugger_trap_entry(): + * Called at start of any synchronous fault or trap, before any other work + * is done. The idea is that if your debugger deliberately caused the trap + * (e.g. to implement breakpoints or data watchpoints) then you can take + * appropriate action and return a non-zero value to cause early exit from + * the trap function. + * + * 2. debugger_trap_fatal(): + * Called when Xen is about to give up and crash. Typically you will use this + * hook to drop into a debug session. It can also be used to hook off + * deliberately caused traps (which you then handle and return non-zero). + * + * 3. debugger_trap_immediate(): + * Called if we want to drop into a debugger now. This is essentially the + * same as debugger_trap_fatal, except that we use the current register state + * rather than the state which was in effect when we took the trap. + * For example: if we're dying because of an unhandled exception, we call + * debugger_trap_fatal; if we're dying because of a panic() we call + * debugger_trap_immediate(). + */ + +#ifndef __X86_DEBUGGER_H__ +#define __X86_DEBUGGER_H__ + +#include +#include +#include + +void domain_pause_for_debugger(void); + +#ifdef CONFIG_CRASH_DEBUG + +#include + +static inline bool debugger_trap_fatal( + unsigned int vector, struct cpu_user_regs *regs) +{ + int rc = __trap_to_gdb(regs, vector); + return ((rc == 0) || (vector == TRAP_int3)); +} + +/* Int3 is a trivial way to gather cpu_user_regs context. */ +#define debugger_trap_immediate() __asm__ __volatile__ ( "int3" ); + +static inline bool debugger_trap_entry( + unsigned int vector, struct cpu_user_regs *regs) +{ + /* + * This function is called before any checks are made. Amongst other + * things, be aware that during early boot, current is not a safe pointer + * to follow. + */ + struct vcpu *v = current; + + if ( vector != TRAP_int3 && vector != TRAP_debug ) + return false; + + if ( guest_mode(regs) && guest_kernel_mode(v, regs) && + v->domain->debugger_attached ) + { + if ( vector != TRAP_debug ) /* domain pause is good enough */ + current->arch.gdbsx_vcpu_event = vector; + domain_pause_for_debugger(); + return true; + } + + return false; +} + +#else + +static inline bool debugger_trap_fatal( + unsigned int vector, struct cpu_user_regs *regs) +{ + return false; +} + +#define debugger_trap_immediate() ((void)0) + +static inline bool debugger_trap_entry( + unsigned int vector, struct cpu_user_regs *regs) +{ + return false; +} + +#endif + +#ifdef CONFIG_GDBSX +unsigned int dbg_rw_mem(unsigned long gva, XEN_GUEST_HANDLE_PARAM(void) buf, + unsigned int len, domid_t domid, bool toaddr, + uint64_t pgd3); +#endif + +#endif /* __X86_DEBUGGER_H__ */ diff --git a/xen/arch/x86/include/asm/debugreg.h b/xen/arch/x86/include/asm/debugreg.h new file mode 100644 index 0000000000..c57914efc6 --- /dev/null +++ b/xen/arch/x86/include/asm/debugreg.h @@ -0,0 +1,83 @@ +#ifndef _X86_DEBUGREG_H +#define _X86_DEBUGREG_H + + +/* Indicate the register numbers for a number of the specific + debug registers. Registers 0-3 contain the addresses we wish to trap on */ + +#define DR_FIRSTADDR 0 +#define DR_LASTADDR 3 +#define DR_STATUS 6 +#define DR_CONTROL 7 + +/* Define a few things for the status register. We can use this to determine + which debugging register was responsible for the trap. The other bits + are either reserved or not of interest to us. */ + +#define DR_TRAP0 (0x1) /* db0 */ +#define DR_TRAP1 (0x2) /* db1 */ +#define DR_TRAP2 (0x4) /* db2 */ +#define DR_TRAP3 (0x8) /* db3 */ +#define DR_STEP (0x4000) /* single-step */ +#define DR_SWITCH (0x8000) /* task switch */ +#define DR_NOT_RTM (0x10000) /* clear: #BP inside RTM region */ +#define DR_STATUS_RESERVED_ZERO (~0xffffeffful) /* Reserved, read as zero */ +#define DR_STATUS_RESERVED_ONE 0xffff0ff0ul /* Reserved, read as one */ + +/* Now define a bunch of things for manipulating the control register. + The top two bytes of the control register consist of 4 fields of 4 + bits - each field corresponds to one of the four debug registers, + and indicates what types of access we trap on, and how large the data + field is that we are looking at */ + +#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */ +#define DR_CONTROL_SIZE 4 /* 4 control bits per register */ + +#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */ +#define DR_RW_WRITE (0x1) +#define DR_IO (0x2) +#define DR_RW_READ (0x3) + +#define DR_LEN_1 (0x0) /* Settings for data length to trap on */ +#define DR_LEN_2 (0x4) +#define DR_LEN_4 (0xC) +#define DR_LEN_8 (0x8) + +/* The low byte to the control register determine which registers are + enabled. There are 4 fields of two bits. One bit is "local", meaning + that the processor will reset the bit after a task switch and the other + is global meaning that we have to explicitly reset the bit. */ + +#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ +#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ +#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ + +#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ +#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */ + +#define DR7_ACTIVE_MASK (DR_LOCAL_ENABLE_MASK|DR_GLOBAL_ENABLE_MASK) + +/* The second byte to the control register has a few special things. + We can slow the instruction pipeline for instructions coming via the + gdt or the ldt if we want to. I am not sure why this is an advantage */ + +#define DR_CONTROL_RESERVED_ZERO (~0xffff27fful) /* Reserved, read as zero */ +#define DR_CONTROL_RESERVED_ONE (0x00000400ul) /* Reserved, read as one */ +#define DR_LOCAL_EXACT_ENABLE (0x00000100ul) /* Local exact enable */ +#define DR_GLOBAL_EXACT_ENABLE (0x00000200ul) /* Global exact enable */ +#define DR_RTM_ENABLE (0x00000800ul) /* RTM debugging enable */ +#define DR_GENERAL_DETECT (0x00002000ul) /* General detect enable */ + +#define write_debugreg(reg, val) do { \ + unsigned long __val = val; \ + asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \ +} while (0) +#define read_debugreg(reg) ({ \ + unsigned long __val; \ + asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \ + __val; \ +}) +long set_debugreg(struct vcpu *, unsigned int reg, unsigned long value); +void activate_debugregs(const struct vcpu *); + +#endif /* _X86_DEBUGREG_H */ diff --git a/xen/arch/x86/include/asm/delay.h b/xen/arch/x86/include/asm/delay.h new file mode 100644 index 0000000000..9be2f46590 --- /dev/null +++ b/xen/arch/x86/include/asm/delay.h @@ -0,0 +1,13 @@ +#ifndef _X86_DELAY_H +#define _X86_DELAY_H + +/* + * Copyright (C) 1993 Linus Torvalds + * + * Delay routines calling functions in arch/i386/lib/delay.c + */ + +extern void __udelay(unsigned long usecs); +#define udelay(n) __udelay(n) + +#endif /* defined(_X86_DELAY_H) */ diff --git a/xen/arch/x86/include/asm/desc.h b/xen/arch/x86/include/asm/desc.h new file mode 100644 index 0000000000..225a864c48 --- /dev/null +++ b/xen/arch/x86/include/asm/desc.h @@ -0,0 +1,252 @@ +#ifndef __ARCH_DESC_H +#define __ARCH_DESC_H + +#include + +/* + * Xen reserves a memory page of GDT entries. + * No guest GDT entries exist beyond the Xen reserved area. + */ +#define NR_RESERVED_GDT_PAGES 1 +#define NR_RESERVED_GDT_BYTES (NR_RESERVED_GDT_PAGES * PAGE_SIZE) +#define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8) + +#define LAST_RESERVED_GDT_PAGE \ + (FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1) +#define LAST_RESERVED_GDT_BYTE \ + (FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1) +#define LAST_RESERVED_GDT_ENTRY \ + (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1) + +#define LDT_ENTRY_SIZE 8 + +#define FLAT_COMPAT_RING1_CS 0xe019 /* GDT index 259 */ +#define FLAT_COMPAT_RING1_DS 0xe021 /* GDT index 260 */ +#define FLAT_COMPAT_RING1_SS 0xe021 /* GDT index 260 */ +#define FLAT_COMPAT_RING3_CS 0xe02b /* GDT index 261 */ +#define FLAT_COMPAT_RING3_DS 0xe033 /* GDT index 262 */ +#define FLAT_COMPAT_RING3_SS 0xe033 /* GDT index 262 */ + +#define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS +#define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS +#define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS +#define FLAT_COMPAT_USER_DS FLAT_COMPAT_RING3_DS +#define FLAT_COMPAT_USER_CS FLAT_COMPAT_RING3_CS +#define FLAT_COMPAT_USER_SS FLAT_COMPAT_RING3_SS + +#define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8) +#define LDT_ENTRY (TSS_ENTRY + 2) +#define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2) + +#define TSS_SELECTOR (TSS_ENTRY << 3) +#define LDT_SELECTOR (LDT_ENTRY << 3) +#define PER_CPU_SELECTOR (PER_CPU_GDT_ENTRY << 3) + +#ifndef __ASSEMBLY__ + +#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3) + +/* Fix up the RPL of a guest segment selector. */ +#define __fixup_guest_selector(d, sel) \ +({ \ + uint16_t _rpl = GUEST_KERNEL_RPL(d); \ + (sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \ +}) + +#define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss) +#define fixup_guest_code_selector(d, cs) __fixup_guest_selector(d, cs) + +/* + * We need this function because enforcing the correct guest kernel RPL is + * unsufficient if the selector is poked into an interrupt, trap or call gate. + * The selector RPL is ignored when a gate is accessed. We must therefore make + * sure that the selector does not reference a Xen-private segment. + * + * Note that selectors used only by IRET do not need to be checked. If the + * descriptor DPL fiffers from CS RPL then we'll #GP. + * + * Stack and data selectors do not need to be checked. If DS, ES, FS, GS are + * DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs + * from CS RPL then we'll #GP. + */ +#define guest_gate_selector_okay(d, sel) \ + ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \ + ((sel) == (!is_pv_32bit_domain(d) ? \ + FLAT_KERNEL_CS : /* Xen default seg? */ \ + FLAT_COMPAT_KERNEL_CS)) || \ + ((sel) & 4)) /* LDT seg? */ + +#endif /* __ASSEMBLY__ */ + +/* These are bitmasks for the high 32 bits of a descriptor table entry. */ +#define _SEGMENT_TYPE (15<< 8) +#define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code) + segment */ +#define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */ +#define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system + segments */ +#define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */ +#define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */ +#define _SEGMENT_P ( 1<<15) /* Segment Present */ +#define _SEGMENT_L ( 1<<21) /* 64-bit segment */ +#define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */ +#define _SEGMENT_G ( 1<<23) /* Granularity */ + +#ifndef __ASSEMBLY__ + +/* System Descriptor types for GDT and IDT entries. */ +#define SYS_DESC_tss16_avail 1 +#define SYS_DESC_ldt 2 +#define SYS_DESC_tss16_busy 3 +#define SYS_DESC_call_gate16 4 +#define SYS_DESC_task_gate 5 +#define SYS_DESC_irq_gate16 6 +#define SYS_DESC_trap_gate16 7 +#define SYS_DESC_tss_avail 9 +#define SYS_DESC_tss_busy 11 +#define SYS_DESC_call_gate 12 +#define SYS_DESC_irq_gate 14 +#define SYS_DESC_trap_gate 15 + +typedef union { + uint64_t raw; + struct { + uint32_t a, b; + }; +} seg_desc_t; + +typedef union { + struct { + uint64_t a, b; + }; + struct { + uint16_t addr0; + uint16_t cs; + uint8_t ist; /* :3, 5 bits rsvd, but this yields far better code. */ + uint8_t type:4, s:1, dpl:2, p:1; + uint16_t addr1; + uint32_t addr2; + /* 32 bits rsvd. */ + }; +} idt_entry_t; + +/* Write the lower 64 bits of an IDT Entry. This relies on the upper 32 + * bits of the address not changing, which is a safe assumption as all + * functions we are likely to load will live inside the 1GB + * code/data/bss address range. + * + * Ideally, we would use cmpxchg16b, but this is not supported on some + * old AMD 64bit capable processors, and has no safe equivalent. + */ +static inline void _write_gate_lower(volatile idt_entry_t *gate, + const idt_entry_t *new) +{ + ASSERT(gate->b == new->b); + gate->a = new->a; +} + +#define _set_gate(gate_addr,type,dpl,addr) \ +do { \ + (gate_addr)->a = 0; \ + smp_wmb(); /* disable gate /then/ rewrite */ \ + (gate_addr)->b = \ + ((unsigned long)(addr) >> 32); \ + smp_wmb(); /* rewrite /then/ enable gate */ \ + (gate_addr)->a = \ + (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \ + ((unsigned long)(dpl) << 45) | \ + ((unsigned long)(type) << 40) | \ + ((unsigned long)(addr) & 0xFFFFUL) | \ + ((unsigned long)__HYPERVISOR_CS << 16) | \ + (1UL << 47); \ +} while (0) + +static inline void _set_gate_lower(idt_entry_t *gate, unsigned long type, + unsigned long dpl, void *addr) +{ + idt_entry_t idte; + idte.b = gate->b; + idte.a = + (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | + ((unsigned long)(dpl) << 45) | + ((unsigned long)(type) << 40) | + ((unsigned long)(addr) & 0xFFFFUL) | + ((unsigned long)__HYPERVISOR_CS << 16) | + (1UL << 47); + _write_gate_lower(gate, &idte); +} + +/* Update the lower half handler of an IDT Entry, without changing any + * other configuration. */ +static inline void _update_gate_addr_lower(idt_entry_t *gate, void *addr) +{ + idt_entry_t idte; + idte.a = gate->a; + + idte.b = ((unsigned long)(addr) >> 32); + idte.a &= 0x0000FFFFFFFF0000ULL; + idte.a |= (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | + ((unsigned long)(addr) & 0xFFFFUL); + + _write_gate_lower(gate, &idte); +} + +#define _set_tssldt_desc(desc,addr,limit,type) \ +do { \ + (desc)[0].b = (desc)[1].b = 0; \ + smp_wmb(); /* disable entry /then/ rewrite */ \ + (desc)[0].a = \ + ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \ + (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \ + smp_wmb(); /* rewrite /then/ enable entry */ \ + (desc)[0].b = \ + ((u32)(addr) & 0xFF000000U) | \ + ((u32)(type) << 8) | 0x8000U | \ + (((u32)(addr) & 0x00FF0000U) >> 16); \ +} while (0) + +struct __packed desc_ptr { + unsigned short limit; + unsigned long base; +}; + +extern seg_desc_t boot_gdt[]; +DECLARE_PER_CPU(seg_desc_t *, gdt); +DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e); +extern seg_desc_t boot_compat_gdt[]; +DECLARE_PER_CPU(seg_desc_t *, compat_gdt); +DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e); +DECLARE_PER_CPU(bool, full_gdt_loaded); + +static inline void lgdt(const struct desc_ptr *gdtr) +{ + __asm__ __volatile__ ( "lgdt %0" :: "m" (*gdtr) : "memory" ); +} + +static inline void lidt(const struct desc_ptr *idtr) +{ + __asm__ __volatile__ ( "lidt %0" :: "m" (*idtr) : "memory" ); +} + +static inline void lldt(unsigned int sel) +{ + __asm__ __volatile__ ( "lldt %w0" :: "rm" (sel) : "memory" ); +} + +static inline void ltr(unsigned int sel) +{ + __asm__ __volatile__ ( "ltr %w0" :: "rm" (sel) : "memory" ); +} + +static inline unsigned int str(void) +{ + unsigned int sel; + + __asm__ ( "str %0" : "=r" (sel) ); + + return sel; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ARCH_DESC_H */ diff --git a/xen/arch/x86/include/asm/device.h b/xen/arch/x86/include/asm/device.h new file mode 100644 index 0000000000..f2acc7effd --- /dev/null +++ b/xen/arch/x86/include/asm/device.h @@ -0,0 +1,25 @@ +#ifndef __ASM_X86_DEVICE_H +#define __ASM_X86_DEVICE_H + +#include + +/* + * x86 only supports PCI. Therefore it's possible to directly use + * pci_dev to avoid adding new field. + */ + +typedef struct pci_dev device_t; + +#define dev_is_pci(dev) ((void)(dev), 1) +#define pci_to_dev(pci) (pci) + +#endif /* __ASM_X86_DEVICE_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/div64.h b/xen/arch/x86/include/asm/div64.h new file mode 100644 index 0000000000..dd49f64a3b --- /dev/null +++ b/xen/arch/x86/include/asm/div64.h @@ -0,0 +1,14 @@ +#ifndef __X86_DIV64 +#define __X86_DIV64 + +#include + +#define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ +}) + +#endif diff --git a/xen/arch/x86/include/asm/dom0_build.h b/xen/arch/x86/include/asm/dom0_build.h new file mode 100644 index 0000000000..a5f8c9e67f --- /dev/null +++ b/xen/arch/x86/include/asm/dom0_build.h @@ -0,0 +1,42 @@ +#ifndef _DOM0_BUILD_H_ +#define _DOM0_BUILD_H_ + +#include +#include + +#include + +extern unsigned int dom0_memflags; + +unsigned long dom0_compute_nr_pages(struct domain *d, + struct elf_dom_parms *parms, + unsigned long initrd_len); +int dom0_setup_permissions(struct domain *d); + +int dom0_construct_pv(struct domain *d, const module_t *image, + unsigned long image_headroom, + module_t *initrd, + char *cmdline); + +int dom0_construct_pvh(struct domain *d, const module_t *image, + unsigned long image_headroom, + module_t *initrd, + char *cmdline); + +unsigned long dom0_paging_pages(const struct domain *d, + unsigned long nr_pages); + +void dom0_update_physmap(bool compat, unsigned long pfn, + unsigned long mfn, unsigned long vphysmap_s); + +#endif /* _DOM0_BUILD_H_ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h new file mode 100644 index 0000000000..e62e109598 --- /dev/null +++ b/xen/arch/x86/include/asm/domain.h @@ -0,0 +1,769 @@ +#ifndef __ASM_DOMAIN_H__ +#define __ASM_DOMAIN_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo) + +#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ + (d)->arch.hvm.irq->callback_via_type == HVMIRQ_callback_vector) +#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain)) +#define is_domain_direct_mapped(d) ((void)(d), 0) + +#define VCPU_TRAP_NONE 0 +#define VCPU_TRAP_NMI 1 +#define VCPU_TRAP_MCE 2 +#define VCPU_TRAP_LAST VCPU_TRAP_MCE + +#define nmi_state async_exception_state(VCPU_TRAP_NMI) +#define mce_state async_exception_state(VCPU_TRAP_MCE) + +#define nmi_pending nmi_state.pending +#define mce_pending mce_state.pending + +struct trap_bounce { + uint32_t error_code; + uint8_t flags; /* TBF_ */ + uint16_t cs; + unsigned long eip; +}; + +#define MAPHASH_ENTRIES 8 +#define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1)) +#define MAPHASHENT_NOTINUSE ((u32)~0U) +struct mapcache_vcpu { + /* Shadow of mapcache_domain.epoch. */ + unsigned int shadow_epoch; + + /* Lock-free per-VCPU hash of recently-used mappings. */ + struct vcpu_maphash_entry { + unsigned long mfn; + uint32_t idx; + uint32_t refcnt; + } hash[MAPHASH_ENTRIES]; +}; + +struct mapcache_domain { + /* The number of array entries, and a cursor into the array. */ + unsigned int entries; + unsigned int cursor; + + /* Protects map_domain_page(). */ + spinlock_t lock; + + /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */ + unsigned int epoch; + u32 tlbflush_timestamp; + + /* Which mappings are in use, and which are garbage to reap next epoch? */ + unsigned long *inuse; + unsigned long *garbage; +}; + +int mapcache_domain_init(struct domain *); +int mapcache_vcpu_init(struct vcpu *); +void mapcache_override_current(struct vcpu *); + +/* x86/64: toggle guest between kernel and user modes. */ +void toggle_guest_mode(struct vcpu *); +/* x86/64: toggle guest page tables between kernel and user modes. */ +void toggle_guest_pt(struct vcpu *); + +void cpuid_policy_updated(struct vcpu *v); + +/* + * Initialise a hypercall-transfer page. The given pointer must be mapped + * in Xen virtual address space (accesses are not validated or checked). + */ +void init_hypercall_page(struct domain *d, void *); + +/************************************************/ +/* shadow paging extension */ +/************************************************/ +struct shadow_domain { +#ifdef CONFIG_SHADOW_PAGING + unsigned int opt_flags; /* runtime tunable optimizations on/off */ + struct page_list_head pinned_shadows; + + /* Memory allocation */ + struct page_list_head freelist; + unsigned int total_pages; /* number of pages allocated */ + unsigned int free_pages; /* number of pages on freelists */ + unsigned int p2m_pages; /* number of pages allocated to p2m */ + + /* 1-to-1 map for use when HVM vcpus have paging disabled */ + pagetable_t unpaged_pagetable; + + /* reflect guest table dirty status, incremented by write + * emulation and remove write permission */ + atomic_t gtable_dirty_version; + + /* Shadow hashtable */ + struct page_info **hash_table; + bool_t hash_walking; /* Some function is walking the hash table */ + + /* Fast MMIO path heuristic */ + bool has_fast_mmio_entries; + + /* OOS */ + bool_t oos_active; + +#ifdef CONFIG_HVM + /* Has this domain ever used HVMOP_pagetable_dying? */ + bool_t pagetable_dying_op; +#endif + +#ifdef CONFIG_PV + /* PV L1 Terminal Fault mitigation. */ + struct tasklet pv_l1tf_tasklet; +#endif /* CONFIG_PV */ +#endif +}; + +struct shadow_vcpu { +#ifdef CONFIG_SHADOW_PAGING +#ifdef CONFIG_HVM + /* PAE guests: per-vcpu shadow top-level table */ + l3_pgentry_t l3table[4] __attribute__((__aligned__(32))); + /* PAE guests: per-vcpu cache of the top-level *guest* entries */ + l3_pgentry_t gl3e[4] __attribute__((__aligned__(32))); + + /* shadow(s) of guest (MFN) */ + pagetable_t shadow_table[4]; +#else + /* shadow of guest (MFN) */ + pagetable_t shadow_table[1]; +#endif + + /* Last MFN that we emulated a write to as unshadow heuristics. */ + unsigned long last_emulated_mfn_for_unshadow; + /* MFN of the last shadow that we shot a writeable mapping in */ + unsigned long last_writeable_pte_smfn; +#ifdef CONFIG_HVM + /* Last frame number that we emulated a write to. */ + unsigned long last_emulated_frame; + /* Last MFN that we emulated a write successfully */ + unsigned long last_emulated_mfn; +#endif + + /* Shadow out-of-sync: pages that this vcpu has let go out of sync */ + mfn_t oos[SHADOW_OOS_PAGES]; + mfn_t oos_snapshot[SHADOW_OOS_PAGES]; + struct oos_fixup { + int next; + mfn_t smfn[SHADOW_OOS_FIXUPS]; + unsigned long off[SHADOW_OOS_FIXUPS]; + } oos_fixup[SHADOW_OOS_PAGES]; + +#ifdef CONFIG_HVM + bool_t pagetable_dying; +#endif +#endif +}; + +/************************************************/ +/* hardware assisted paging */ +/************************************************/ +struct hap_domain { + struct page_list_head freelist; + unsigned int total_pages; /* number of pages allocated */ + unsigned int free_pages; /* number of pages on freelists */ + unsigned int p2m_pages; /* number of pages allocated to p2m */ +}; + +/************************************************/ +/* common paging data structure */ +/************************************************/ +struct log_dirty_domain { + /* log-dirty radix tree to record dirty pages */ + mfn_t top; + unsigned int allocs; + unsigned int failed_allocs; + + /* log-dirty mode stats */ + unsigned long fault_count; + unsigned long dirty_count; + + /* functions which are paging mode specific */ + const struct log_dirty_ops { + int (*enable )(struct domain *d, bool log_global); + int (*disable )(struct domain *d); + void (*clean )(struct domain *d); + } *ops; +}; + +struct paging_domain { + /* paging lock */ + mm_lock_t lock; + + /* flags to control paging operation */ + u32 mode; + /* Has that pool ever run out of memory? */ + bool_t p2m_alloc_failed; + /* extension for shadow paging support */ + struct shadow_domain shadow; + /* extension for hardware-assited paging */ + struct hap_domain hap; + /* log dirty support */ + struct log_dirty_domain log_dirty; + + /* preemption handling */ + struct { + const struct domain *dom; + unsigned int op; + union { + struct { + unsigned long done:PADDR_BITS - PAGE_SHIFT; + unsigned long i4:PAGETABLE_ORDER; + unsigned long i3:PAGETABLE_ORDER; + } log_dirty; + }; + } preempt; + + /* alloc/free pages from the pool for paging-assistance structures + * (used by p2m and log-dirty code for their tries) */ + struct page_info * (*alloc_page)(struct domain *d); + void (*free_page)(struct domain *d, struct page_info *pg); +}; + +struct paging_vcpu { + /* Pointers to mode-specific entry points. */ + const struct paging_mode *mode; + /* Nested Virtualization: paging mode of nested guest */ + const struct paging_mode *nestedmode; +#ifdef CONFIG_HVM + /* HVM guest: last emulate was to a pagetable */ + unsigned int last_write_was_pt:1; + /* HVM guest: last write emulation succeeds */ + unsigned int last_write_emul_ok:1; +#endif + /* Translated guest: virtual TLB */ + struct shadow_vtlb *vtlb; + spinlock_t vtlb_lock; + + /* paging support extension */ + struct shadow_vcpu shadow; +}; + +#define MAX_NESTEDP2M 10 + +#define MAX_ALTP2M 10 /* arbitrary */ +#define INVALID_ALTP2M 0xffff +#define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t)) +struct p2m_domain; +struct time_scale { + int shift; + u32 mul_frac; +}; + +struct pv_domain +{ + l1_pgentry_t **gdt_ldt_l1tab; + + atomic_t nr_l4_pages; + + /* Is a 32-bit PV guest? */ + bool is_32bit; + /* XPTI active? */ + bool xpti; + /* Use PCID feature? */ + bool pcid; + /* Mitigate L1TF with shadow/crashing? */ + bool check_l1tf; + + /* map_domain_page() mapping cache. */ + struct mapcache_domain mapcache; + + struct cpuidmasks *cpuidmasks; +}; + +struct monitor_write_data { + struct { + unsigned int msr : 1; + unsigned int cr0 : 1; + unsigned int cr3 : 1; + unsigned int cr4 : 1; + } do_write; + + bool cr3_noflush; + + uint32_t msr; + uint64_t value; + uint64_t cr0; + uint64_t cr3; + uint64_t cr4; +}; + +struct arch_domain +{ + struct page_info *perdomain_l3_pg; + +#ifdef CONFIG_PV32 + unsigned int hv_compat_vstart; +#endif + + /* Maximum physical-address bitwidth supported by this guest. */ + unsigned int physaddr_bitsize; + + /* I/O-port admin-specified access capabilities. */ + struct rangeset *ioport_caps; + uint32_t pci_cf8; + uint8_t cmos_idx; + + union { + struct pv_domain pv; + struct hvm_domain hvm; + }; + + struct paging_domain paging; + struct p2m_domain *p2m; + /* To enforce lock ordering in the pod code wrt the + * page_alloc lock */ + int page_alloc_unlock_level; + + /* Continuable domain_relinquish_resources(). */ + unsigned int rel_priv; + struct page_list_head relmem_list; + + const struct arch_csw { + void (*from)(struct vcpu *); + void (*to)(struct vcpu *); + void noreturn (*tail)(void); + } *ctxt_switch; + +#ifdef CONFIG_HVM + /* nestedhvm: translate l2 guest physical to host physical */ + struct p2m_domain *nested_p2m[MAX_NESTEDP2M]; + mm_lock_t nested_p2m_lock; + + /* altp2m: allow multiple copies of host p2m */ + bool_t altp2m_active; + struct p2m_domain *altp2m_p2m[MAX_ALTP2M]; + mm_lock_t altp2m_list_lock; + uint64_t *altp2m_eptp; + uint64_t *altp2m_visible_eptp; +#endif + + /* NB. protected by d->event_lock and by irq_desc[irq].lock */ + struct radix_tree_root irq_pirq; + + /* Is shared-info page in 32-bit format? */ + bool_t has_32bit_shinfo; + + /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */ + bool_t auto_unmask; + + /* + * The width of the FIP/FDP register in the FPU that needs to be + * saved/restored during a context switch. This is needed because + * the FPU can either: a) restore the 64-bit FIP/FDP and clear FCS + * and FDS; or b) restore the 32-bit FIP/FDP (clearing the upper + * 32-bits of FIP/FDP) and restore FCS/FDS. + * + * Which one is needed depends on the guest. + * + * This can be either: 8, 4 or 0. 0 means auto-detect the size + * based on the width of FIP/FDP values that are written by the + * guest. + */ + uint8_t x87_fip_width; + + /* CPUID and MSR policy objects. */ + struct cpuid_policy *cpuid; + struct msr_policy *msr; + + struct PITState vpit; + + /* TSC management (emulation, pv, scaling, stats) */ + int tsc_mode; /* see asm/time.h */ + bool_t vtsc; /* tsc is emulated (may change after migrate) */ + s_time_t vtsc_last; /* previous TSC value (guarantee monotonicity) */ + uint64_t vtsc_offset; /* adjustment for save/restore/migrate */ + uint32_t tsc_khz; /* cached guest khz for certain emulated or + hardware TSC scaling cases */ + struct time_scale vtsc_to_ns; /* scaling for certain emulated or + hardware TSC scaling cases */ + struct time_scale ns_to_vtsc; /* scaling for certain emulated or + hardware TSC scaling cases */ + uint32_t incarnation; /* incremented every restore or live migrate + (possibly other cases in the future */ + + /* Pseudophysical e820 map (XENMEM_memory_map). */ + spinlock_t e820_lock; + struct e820entry *e820; + unsigned int nr_e820; + + /* RMID assigned to the domain for CMT */ + unsigned int psr_rmid; + /* COS assigned to the domain for each socket */ + unsigned int *psr_cos_ids; + + /* Shared page for notifying that explicit PIRQ EOI is required. */ + unsigned long *pirq_eoi_map; + unsigned long pirq_eoi_map_mfn; + + /* Arch-specific monitor options */ + struct { + unsigned int write_ctrlreg_enabled : 4; + unsigned int write_ctrlreg_sync : 4; + unsigned int write_ctrlreg_onchangeonly : 4; + unsigned int singlestep_enabled : 1; + unsigned int software_breakpoint_enabled : 1; + unsigned int debug_exception_enabled : 1; + unsigned int debug_exception_sync : 1; + unsigned int cpuid_enabled : 1; + unsigned int descriptor_access_enabled : 1; + unsigned int guest_request_userspace_enabled : 1; + unsigned int emul_unimplemented_enabled : 1; + /* + * By default all events are sent. + * This is used to filter out pagefaults. + */ + unsigned int inguest_pagefault_disabled : 1; + unsigned int control_register_values : 1; + struct monitor_msr_bitmap *msr_bitmap; + uint64_t write_ctrlreg_mask[4]; + } monitor; + + /* Mem_access emulation control */ + bool_t mem_access_emulate_each_rep; + + /* Don't unconditionally inject #GP for unhandled MSRs. */ + bool msr_relaxed; + + /* Emulated devices enabled bitmap. */ + uint32_t emulation_flags; +} __cacheline_aligned; + +#ifdef CONFIG_HVM +#define X86_EMU_LAPIC XEN_X86_EMU_LAPIC +#define X86_EMU_HPET XEN_X86_EMU_HPET +#define X86_EMU_PM XEN_X86_EMU_PM +#define X86_EMU_RTC XEN_X86_EMU_RTC +#define X86_EMU_IOAPIC XEN_X86_EMU_IOAPIC +#define X86_EMU_PIC XEN_X86_EMU_PIC +#define X86_EMU_VGA XEN_X86_EMU_VGA +#define X86_EMU_IOMMU XEN_X86_EMU_IOMMU +#define X86_EMU_USE_PIRQ XEN_X86_EMU_USE_PIRQ +#define X86_EMU_VPCI XEN_X86_EMU_VPCI +#else +#define X86_EMU_LAPIC 0 +#define X86_EMU_HPET 0 +#define X86_EMU_PM 0 +#define X86_EMU_RTC 0 +#define X86_EMU_IOAPIC 0 +#define X86_EMU_PIC 0 +#define X86_EMU_VGA 0 +#define X86_EMU_IOMMU 0 +#define X86_EMU_USE_PIRQ 0 +#define X86_EMU_VPCI 0 +#endif + +#define X86_EMU_PIT XEN_X86_EMU_PIT + +/* This must match XEN_X86_EMU_ALL in xen.h */ +#define X86_EMU_ALL (X86_EMU_LAPIC | X86_EMU_HPET | \ + X86_EMU_PM | X86_EMU_RTC | \ + X86_EMU_IOAPIC | X86_EMU_PIC | \ + X86_EMU_VGA | X86_EMU_IOMMU | \ + X86_EMU_PIT | X86_EMU_USE_PIRQ | \ + X86_EMU_VPCI) + +#define has_vlapic(d) (!!((d)->arch.emulation_flags & X86_EMU_LAPIC)) +#define has_vhpet(d) (!!((d)->arch.emulation_flags & X86_EMU_HPET)) +#define has_vpm(d) (!!((d)->arch.emulation_flags & X86_EMU_PM)) +#define has_vrtc(d) (!!((d)->arch.emulation_flags & X86_EMU_RTC)) +#define has_vioapic(d) (!!((d)->arch.emulation_flags & X86_EMU_IOAPIC)) +#define has_vpic(d) (!!((d)->arch.emulation_flags & X86_EMU_PIC)) +#define has_vvga(d) (!!((d)->arch.emulation_flags & X86_EMU_VGA)) +#define has_viommu(d) (!!((d)->arch.emulation_flags & X86_EMU_IOMMU)) +#define has_vpit(d) (!!((d)->arch.emulation_flags & X86_EMU_PIT)) +#define has_pirq(d) (!!((d)->arch.emulation_flags & X86_EMU_USE_PIRQ)) +#define has_vpci(d) (!!((d)->arch.emulation_flags & X86_EMU_VPCI)) + +#define gdt_ldt_pt_idx(v) \ + ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT)) +#define pv_gdt_ptes(v) \ + ((v)->domain->arch.pv.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \ + (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))) +#define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16) + +struct pv_vcpu +{ + /* map_domain_page() mapping cache. */ + struct mapcache_vcpu mapcache; + + unsigned int vgc_flags; + + struct trap_info *trap_ctxt; + + unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE]; + unsigned long ldt_base; + unsigned int gdt_ents, ldt_ents; + + unsigned long kernel_ss, kernel_sp; + unsigned long ctrlreg[8]; + + unsigned long event_callback_eip; + unsigned long failsafe_callback_eip; + union { + unsigned long syscall_callback_eip; + struct { + unsigned int event_callback_cs; + unsigned int failsafe_callback_cs; + }; + }; + + unsigned long syscall32_callback_eip; + unsigned long sysenter_callback_eip; + unsigned short syscall32_callback_cs; + unsigned short sysenter_callback_cs; + bool_t syscall32_disables_events; + bool_t sysenter_disables_events; + + /* + * 64bit segment bases. + * + * FS and the active GS are always stale when the vCPU is in context, as + * the guest can change them behind Xen's back with MOV SREG, or + * WR{FS,GS}BASE on capable hardware. + * + * The inactive GS base is never stale, as guests can't use SWAPGS to + * access it - all modification is performed by Xen either directly + * (hypercall, #GP emulation), or indirectly (toggle_guest_mode()). + * + * The vCPU context switch path is optimised based on this fact, so any + * path updating or swapping the inactive base must update the cached + * value as well. + * + * Which GS base is active and inactive depends on whether the vCPU is in + * user or kernel context. + */ + unsigned long fs_base; + unsigned long gs_base_kernel; + unsigned long gs_base_user; + + /* Bounce information for propagating an exception to guest OS. */ + struct trap_bounce trap_bounce; + + /* I/O-port access bitmap. */ + XEN_GUEST_HANDLE(uint8) iobmp; /* Guest kernel vaddr of the bitmap. */ + unsigned int iobmp_limit; /* Number of ports represented in the bitmap. */ +#define IOPL(val) MASK_INSR(val, X86_EFLAGS_IOPL) + unsigned int iopl; /* Current IOPL for this VCPU, shifted left by + * 12 to match the eflags register. */ + + /* + * %dr7 bits the guest has set, but aren't loaded into hardware, and are + * completely emulated. + */ + uint32_t dr7_emul; + + /* Deferred VA-based update state. */ + bool_t need_update_runstate_area; + struct vcpu_time_info pending_system_time; +}; + +struct arch_vcpu +{ + /* + * guest context (mirroring struct vcpu_guest_context) common + * between pv and hvm guests + */ + + void *fpu_ctxt; + struct cpu_user_regs user_regs; + + /* Debug registers. */ + unsigned long dr[4]; + unsigned long dr7; /* Ideally int, but __vmread() needs long. */ + unsigned int dr6; + + /* other state */ + + unsigned long flags; /* TF_ */ + + struct vpmu_struct vpmu; + + struct { + bool pending; + uint8_t old_mask; + } async_exception_state[VCPU_TRAP_LAST]; +#define async_exception_state(t) async_exception_state[(t)-1] + uint8_t async_exception_mask; + + /* Virtual Machine Extensions */ + union { + struct pv_vcpu pv; + struct hvm_vcpu hvm; + }; + + /* + * guest_table{,_user} hold a ref to the page, and also a type-count + * unless shadow refcounts are in use + */ + pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */ + pagetable_t guest_table; /* (MFN) guest notion of cr3 */ + struct page_info *old_guest_table; /* partially destructed pagetable */ + struct page_info *old_guest_ptpg; /* containing page table of the */ + /* former, if any */ + bool old_guest_table_partial; /* Are we dropping a type ref, or just + * finishing up a partial de-validation? */ + + unsigned long cr3; /* (MA) value to install in HW CR3 */ + + /* + * The save area for Processor Extended States and the bitmask of the + * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has + * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in + * #NM handler, we XRSTOR the states we XSAVE-ed; + */ + struct xsave_struct *xsave_area; + uint64_t xcr0; + /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen + * itself, as we can never know whether guest OS depends on content + * preservation whenever guest OS clears one feature flag (for example, + * temporarily). + * However, processor should not be able to touch eXtended states before + * it explicitly enables it via xcr0. + */ + uint64_t xcr0_accum; + /* This variable determines whether nonlazy extended state has been used, + * and thus should be saved/restored. */ + bool_t nonlazy_xstate_used; + + /* Restore all FPU state (lazy and non-lazy state) on context switch? */ + bool fully_eager_fpu; + + struct vmce vmce; + + struct paging_vcpu paging; + + uint32_t gdbsx_vcpu_event; + + /* A secondary copy of the vcpu time info. */ + XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest; + + struct arch_vm_event *vm_event; + + struct vcpu_msrs *msrs; + + struct { + bool next_interrupt_enabled; + } monitor; +}; + +struct guest_memory_policy +{ + bool nested_guest_mode; +}; + +void update_guest_memory_policy(struct vcpu *v, + struct guest_memory_policy *policy); + +void domain_cpu_policy_changed(struct domain *d); + +bool update_runstate_area(struct vcpu *); +bool update_secondary_system_time(struct vcpu *, + struct vcpu_time_info *); + +void vcpu_show_execution_state(struct vcpu *); +void vcpu_show_registers(const struct vcpu *); + +static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void) +{ + return vmalloc(sizeof(struct vcpu_guest_context)); +} + +static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc) +{ + vfree(vgc); +} + +void arch_vcpu_regs_init(struct vcpu *v); + +struct vcpu_hvm_context; +int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context *ctx); + +#ifdef CONFIG_PV +void pv_inject_event(const struct x86_event *event); +#else +static inline void pv_inject_event(const struct x86_event *event) +{ + ASSERT_UNREACHABLE(); +} +#endif + +static inline void pv_inject_hw_exception(unsigned int vector, int errcode) +{ + const struct x86_event event = { + .vector = vector, + .type = X86_EVENTTYPE_HW_EXCEPTION, + .error_code = errcode, + }; + + pv_inject_event(&event); +} + +static inline void pv_inject_page_fault(int errcode, unsigned long cr2) +{ + const struct x86_event event = { + .vector = TRAP_page_fault, + .type = X86_EVENTTYPE_HW_EXCEPTION, + .error_code = errcode, + .cr2 = cr2, + }; + + pv_inject_event(&event); +} + +static inline void pv_inject_sw_interrupt(unsigned int vector) +{ + const struct x86_event event = { + .vector = vector, + .type = X86_EVENTTYPE_SW_INTERRUPT, + .error_code = X86_EVENT_NO_EC, + }; + + pv_inject_event(&event); +} + +#define PV32_VM_ASSIST_MASK ((1UL << VMASST_TYPE_4gb_segments) | \ + (1UL << VMASST_TYPE_4gb_segments_notify) | \ + (1UL << VMASST_TYPE_writable_pagetables) | \ + (1UL << VMASST_TYPE_pae_extended_cr3) | \ + (1UL << VMASST_TYPE_architectural_iopl) | \ + (1UL << VMASST_TYPE_runstate_update_flag)) +/* + * Various of what PV32_VM_ASSIST_MASK has isn't really applicable to 64-bit, + * but we can't make such requests fail all of the sudden. + */ +#define PV64_VM_ASSIST_MASK (PV32_VM_ASSIST_MASK | \ + (1UL << VMASST_TYPE_m2p_strict)) +#define HVM_VM_ASSIST_MASK (1UL << VMASST_TYPE_runstate_update_flag) + +#define arch_vm_assist_valid_mask(d) \ + (is_hvm_domain(d) ? HVM_VM_ASSIST_MASK \ + : is_pv_32bit_domain(d) ? PV32_VM_ASSIST_MASK \ + : PV64_VM_ASSIST_MASK) + +#endif /* __ASM_DOMAIN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/e820.h b/xen/arch/x86/include/asm/e820.h new file mode 100644 index 0000000000..9d8f1ba960 --- /dev/null +++ b/xen/arch/x86/include/asm/e820.h @@ -0,0 +1,42 @@ +#ifndef __E820_HEADER +#define __E820_HEADER + +/* + * PC BIOS standard E820 types and structure. + */ +#define E820_RAM 1 +#define E820_RESERVED 2 +#define E820_ACPI 3 +#define E820_NVS 4 +#define E820_UNUSABLE 5 + +struct __packed e820entry { + uint64_t addr; + uint64_t size; + uint32_t type; +}; + +#define E820MAX 1024 + +struct e820map { + unsigned int nr_map; + struct e820entry map[E820MAX]; +}; + +extern int sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map); +extern int e820_all_mapped(u64 start, u64 end, unsigned type); +extern int reserve_e820_ram(struct e820map *e820, uint64_t s, uint64_t e); +extern int e820_change_range_type( + struct e820map *e820, uint64_t s, uint64_t e, + uint32_t orig_type, uint32_t new_type); +extern int e820_add_range( + struct e820map *, uint64_t s, uint64_t e, uint32_t type); +extern unsigned long init_e820(const char *, struct e820map *); +extern struct e820map e820; +extern struct e820map e820_raw; + +/* These symbols live in the boot trampoline. */ +extern struct e820map bios_e820map[]; +extern unsigned int bios_e820nr; + +#endif /*__E820_HEADER*/ diff --git a/xen/arch/x86/include/asm/edd.h b/xen/arch/x86/include/asm/edd.h new file mode 100644 index 0000000000..afaa23732a --- /dev/null +++ b/xen/arch/x86/include/asm/edd.h @@ -0,0 +1,164 @@ +/****************************************************************************** + * edd.h + * + * Copyright (C) 2002, 2003, 2004 Dell Inc. + * by Matt Domsch + * + * structures and definitions for the int 13h, ax={41,48}h + * BIOS Enhanced Disk Drive Services + * This is based on the T13 group document D1572 Revision 0 (August 14 2002) + * available at http://www.t13.org/docs2002/d1572r0.pdf. It is + * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __XEN_EDD_H__ +#define __XEN_EDD_H__ + +#ifndef __ASSEMBLY__ + +struct __packed edd_info { + /* Int13, Fn48: Check Extensions Present. */ + u8 device; /* %dl: device */ + u8 version; /* %ah: major version */ + u16 interface_support; /* %cx: interface support bitmap */ + /* Int13, Fn08: Legacy Get Device Parameters. */ + u16 legacy_max_cylinder; /* %cl[7:6]:%ch: maximum cylinder number */ + u8 legacy_max_head; /* %dh: maximum head number */ + u8 legacy_sectors_per_track; /* %cl[5:0]: maximum sector number */ + /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ + struct __packed edd_device_params { + u16 length; + u16 info_flags; + u32 num_default_cylinders; + u32 num_default_heads; + u32 sectors_per_track; + u64 number_of_sectors; + u16 bytes_per_sector; + u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */ + u16 key; /* = 0xBEDD */ + u8 device_path_info_length; + u8 reserved2; + u16 reserved3; + u8 host_bus_type[4]; + u8 interface_type[8]; + union { + struct __packed { + u16 base_address; + u16 reserved1; + u32 reserved2; + } isa; + struct __packed { + u8 bus; + u8 slot; + u8 function; + u8 channel; + u32 reserved; + } pci; + /* pcix is same as pci */ + struct __packed { + u64 reserved; + } ibnd; + struct __packed { + u64 reserved; + } xprs; + struct __packed { + u64 reserved; + } htpt; + struct __packed { + u64 reserved; + } unknown; + } interface_path; + union { + struct __packed { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } ata; + struct __packed { + u8 device; + u8 lun; + u8 reserved1; + u8 reserved2; + u32 reserved3; + u64 reserved4; + } atapi; + struct __packed { + u16 id; + u64 lun; + u16 reserved1; + u32 reserved2; + } scsi; + struct __packed { + u64 serial_number; + u64 reserved; + } usb; + struct __packed { + u64 eui; + u64 reserved; + } i1394; + struct __packed { + u64 wwid; + u64 lun; + } fibre; + struct __packed { + u64 identity_tag; + u64 reserved; + } i2o; + struct __packed { + u32 array_number; + u32 reserved1; + u64 reserved2; + } raid; + struct __packed { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } sata; + struct __packed { + u64 reserved1; + u64 reserved2; + } unknown; + } device_path; + u8 reserved4; + u8 checksum; + } edd_device_params; +}; + +struct __packed mbr_signature { + u8 device; + u8 pad[3]; + u32 signature; +}; + +/* These all reside in the boot trampoline. Access via bootsym(). */ +extern struct mbr_signature boot_mbr_signature[]; +extern u8 boot_mbr_signature_nr; +extern struct edd_info boot_edd_info[]; +extern u8 boot_edd_info_nr; + +#endif /* __ASSEMBLY__ */ + +/* Maximum number of EDD information structures at boot_edd_info. */ +#define EDD_INFO_MAX 6 + +/* Maximum number of MBR signatures at boot_mbr_signature. */ +#define EDD_MBR_SIG_MAX 16 + +/* Size of components of EDD information structure. */ +#define EDDEXTSIZE 8 +#define EDDPARMSIZE 74 + +#endif /* __XEN_EDD_H__ */ diff --git a/xen/arch/x86/include/asm/efibind.h b/xen/arch/x86/include/asm/efibind.h new file mode 100644 index 0000000000..bce02f3707 --- /dev/null +++ b/xen/arch/x86/include/asm/efibind.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/xen/arch/x86/include/asm/elf.h b/xen/arch/x86/include/asm/elf.h new file mode 100644 index 0000000000..1d7ea96e22 --- /dev/null +++ b/xen/arch/x86/include/asm/elf.h @@ -0,0 +1,20 @@ +#ifndef __X86_ELF_H__ +#define __X86_ELF_H__ + +typedef struct { + unsigned long cr0, cr2, cr3, cr4; +} crash_xen_core_t; + +#include + +#endif /* __X86_ELF_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/event.h b/xen/arch/x86/include/asm/event.h new file mode 100644 index 0000000000..5e09ede6d7 --- /dev/null +++ b/xen/arch/x86/include/asm/event.h @@ -0,0 +1,56 @@ +/****************************************************************************** + * event.h + * + * A nice interface for passing asynchronous events to guest OSes. + * (architecture-dependent part) + * + */ + +#ifndef __ASM_EVENT_H__ +#define __ASM_EVENT_H__ + +#include + +void vcpu_kick(struct vcpu *v); +void vcpu_mark_events_pending(struct vcpu *v); + +static inline int vcpu_event_delivery_is_enabled(struct vcpu *v) +{ + return !vcpu_info(v, evtchn_upcall_mask); +} + +int hvm_local_events_need_delivery(struct vcpu *v); +static always_inline bool local_events_need_delivery(void) +{ + struct vcpu *v = current; + + ASSERT(!is_idle_vcpu(v)); + + return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) : + (vcpu_info(v, evtchn_upcall_pending) && + !vcpu_info(v, evtchn_upcall_mask))); +} + +static inline void local_event_delivery_disable(void) +{ + vcpu_info(current, evtchn_upcall_mask) = 1; +} + +static inline void local_event_delivery_enable(void) +{ + vcpu_info(current, evtchn_upcall_mask) = 0; +} + +/* No arch specific virq definition now. Default to global. */ +static inline bool arch_virq_is_global(unsigned int virq) +{ + return true; +} + +#ifdef CONFIG_PV_SHIM +# include +# define arch_evtchn_is_special(chn) \ + (pv_shim && (chn)->port && (chn)->state == ECS_RESERVED) +#endif + +#endif diff --git a/xen/arch/x86/include/asm/fixmap.h b/xen/arch/x86/include/asm/fixmap.h new file mode 100644 index 0000000000..20746afd0a --- /dev/null +++ b/xen/arch/x86/include/asm/fixmap.h @@ -0,0 +1,117 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * Modifications for Xen are copyright (c) 2002-2004, K A Fraser + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#include + +#define FIXADDR_TOP (VMAP_VIRT_END - PAGE_SIZE) +#define FIXADDR_X_TOP (XEN_VIRT_END - PAGE_SIZE) + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory backwards. + */ +enum fixed_addresses { + /* Index 0 is reserved since fix_to_virt(0) == FIXADDR_TOP. */ + FIX_RESERVED, + /* + * Indexes using the page tables set up before entering __start_xen() + * must be among the first (L1_PAGETABLE_ENTRIES - 1) entries. + * These are generally those needed by the various console drivers. + */ + FIX_COM_BEGIN, + FIX_COM_END, + FIX_EHCI_DBGP, +#ifdef CONFIG_XEN_GUEST + FIX_PV_CONSOLE, + FIX_XEN_SHARED_INFO, +#endif /* CONFIG_XEN_GUEST */ + /* Everything else should go further down. */ + FIX_APIC_BASE, + FIX_IO_APIC_BASE_0, + FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, + FIX_ACPI_BEGIN, + FIX_ACPI_END = FIX_ACPI_BEGIN + NUM_FIXMAP_ACPI_PAGES - 1, + FIX_HPET_BASE, + FIX_TBOOT_SHARED_BASE, + FIX_MSIX_IO_RESERV_BASE, + FIX_MSIX_IO_RESERV_END = FIX_MSIX_IO_RESERV_BASE + FIX_MSIX_MAX_PAGES -1, + FIX_TBOOT_MAP_ADDRESS, + FIX_APEI_RANGE_BASE, + FIX_APEI_RANGE_END = FIX_APEI_RANGE_BASE + FIX_APEI_RANGE_MAX -1, + FIX_EFI_MPF, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +extern void __set_fixmap( + enum fixed_addresses idx, unsigned long mfn, unsigned long flags); + +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR) + +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR_UCMINUS) + +#define clear_fixmap(idx) __set_fixmap(idx, 0, 0) + +#define __fix_to_virt(x) gcc11_wrap(FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +#define fix_to_virt(x) ((void *)__fix_to_virt(x)) + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +enum fixed_addresses_x { + /* Index 0 is reserved since fix_x_to_virt(0) == FIXADDR_X_TOP. */ + FIX_X_RESERVED, +#ifdef CONFIG_HYPERV_GUEST + FIX_X_HYPERV_HCALL, +#endif + __end_of_fixed_addresses_x +}; + +#define FIXADDR_X_SIZE (__end_of_fixed_addresses_x << PAGE_SHIFT) +#define FIXADDR_X_START (FIXADDR_X_TOP - FIXADDR_X_SIZE) + +extern void __set_fixmap_x( + enum fixed_addresses_x idx, unsigned long mfn, unsigned long flags); + +#define set_fixmap_x(idx, phys) \ + __set_fixmap_x(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR_RX | MAP_SMALL_PAGES) + +#define clear_fixmap_x(idx) __set_fixmap_x(idx, 0, 0) + +#define __fix_x_to_virt(x) (FIXADDR_X_TOP - ((x) << PAGE_SHIFT)) +#define fix_x_to_virt(x) ((void *)__fix_x_to_virt(x)) + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/xen/arch/x86/include/asm/flushtlb.h b/xen/arch/x86/include/asm/flushtlb.h new file mode 100644 index 0000000000..0be2273387 --- /dev/null +++ b/xen/arch/x86/include/asm/flushtlb.h @@ -0,0 +1,203 @@ +/****************************************************************************** + * flushtlb.h + * + * TLB flushes are timestamped using a global virtual 'clock' which ticks + * on any TLB flush on any processor. + * + * Copyright (c) 2003-2004, K A Fraser + */ + +#ifndef __FLUSHTLB_H__ +#define __FLUSHTLB_H__ + +#include +#include +#include +#include + +/* The current time as shown by the virtual TLB clock. */ +extern u32 tlbflush_clock; + +/* Time at which each CPU's TLB was last flushed. */ +DECLARE_PER_CPU(u32, tlbflush_time); + +/* TLB clock is in use. */ +extern bool tlb_clk_enabled; + +static inline uint32_t tlbflush_current_time(void) +{ + /* Returning 0 from tlbflush_current_time will always force a flush. */ + return tlb_clk_enabled ? tlbflush_clock : 0; +} + +static inline void page_set_tlbflush_timestamp(struct page_info *page) +{ + /* Avoid the write if the TLB clock is disabled. */ + if ( !tlb_clk_enabled ) + return; + + /* + * Prevent storing a stale time stamp, which could happen if an update + * to tlbflush_clock plus a subsequent flush IPI happen between the + * reading of tlbflush_clock and the writing of the struct page_info + * field. + */ + ASSERT(local_irq_is_enabled()); + local_irq_disable(); + page->tlbflush_timestamp = tlbflush_current_time(); + local_irq_enable(); +} + +/* + * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing. + * @lastuse_stamp is a timestamp taken when the PFN we are testing was last + * used for a purpose that may have caused the CPU's TLB to become tainted. + */ +static inline bool NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp) +{ + u32 curr_time = tlbflush_current_time(); + /* + * Two cases: + * 1. During a wrap, the clock ticks over to 0 while CPUs catch up. For + * safety during this period, we force a flush if @curr_time == 0. + * 2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp. + * To detect false positives because @cpu_stamp has wrapped, we + * also check @curr_time. If less than @lastuse_stamp we definitely + * wrapped, so there's no need for a flush (one is forced every wrap). + */ + return ((curr_time == 0) || + ((cpu_stamp <= lastuse_stamp) && + (lastuse_stamp <= curr_time))); +} + +/* + * Filter the given set of CPUs, removing those that definitely flushed their + * TLB since @page_timestamp. + */ +static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) +{ + unsigned int cpu; + + /* Short-circuit: there's no need to iterate if the clock is disabled. */ + if ( !tlb_clk_enabled ) + return; + + for_each_cpu ( cpu, mask ) + if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) + __cpumask_clear_cpu(cpu, mask); +} + +void new_tlbflush_clock_period(void); + +/* Read pagetable base. */ +static inline unsigned long read_cr3(void) +{ + unsigned long cr3; + __asm__ __volatile__ ( + "mov %%cr3, %0" : "=r" (cr3) : ); + return cr3; +} + +/* Write pagetable base and implicitly tick the tlbflush clock. */ +void switch_cr3_cr4(unsigned long cr3, unsigned long cr4); + +/* flush_* flag fields: */ + /* + * Area to flush: 2^flush_order pages. Default is flush entire address space. + * NB. Multi-page areas do not need to have been mapped with a superpage. + */ +#define FLUSH_ORDER_MASK 0xff +#define FLUSH_ORDER(x) ((x)+1) + /* Flush TLBs (or parts thereof) */ +#define FLUSH_TLB 0x100 + /* Flush TLBs (or parts thereof) including global mappings */ +#define FLUSH_TLB_GLOBAL 0x200 + /* Flush data caches */ +#define FLUSH_CACHE 0x400 + /* VA for the flush has a valid mapping */ +#define FLUSH_VA_VALID 0x800 + /* Flush CPU state */ +#define FLUSH_VCPU_STATE 0x1000 + /* Flush the per-cpu root page table */ +#define FLUSH_ROOT_PGTBL 0x2000 +#if CONFIG_HVM + /* Flush all HVM guests linear TLB (using ASID/VPID) */ +#define FLUSH_HVM_ASID_CORE 0x4000 +#else +#define FLUSH_HVM_ASID_CORE 0 +#endif +#if defined(CONFIG_PV) || defined(CONFIG_SHADOW_PAGING) +/* + * Force an IPI to be sent. Note that adding this to the flags passed to + * flush_area_mask will prevent using the assisted flush without having any + * other side effect. + */ +# define FLUSH_FORCE_IPI 0x8000 +#else +# define FLUSH_FORCE_IPI 0 +#endif + +/* Flush local TLBs/caches. */ +unsigned int flush_area_local(const void *va, unsigned int flags); +#define flush_local(flags) flush_area_local(NULL, flags) + +/* Flush specified CPUs' TLBs/caches */ +void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags); +#define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags) + +/* Flush all CPUs' TLBs/caches */ +#define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags) +#define flush_all(flags) flush_mask(&cpu_online_map, flags) + +/* Flush local TLBs */ +#define flush_tlb_local() \ + flush_local(FLUSH_TLB) +#define flush_tlb_one_local(v) \ + flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0)) + +/* Flush specified CPUs' TLBs */ +#define flush_tlb_mask(mask) \ + flush_mask(mask, FLUSH_TLB) +#define flush_tlb_one_mask(mask,v) \ + flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0)) + +/* + * Make the common code TLB flush helper force use of an IPI in order to be + * on the safe side. Note that not all calls from common code strictly require + * this. + */ +#define arch_flush_tlb_mask(mask) flush_mask(mask, FLUSH_TLB | FLUSH_FORCE_IPI) + +/* Flush all CPUs' TLBs */ +#define flush_tlb_all() \ + flush_tlb_mask(&cpu_online_map) +#define flush_tlb_one_all(v) \ + flush_tlb_one_mask(&cpu_online_map, v) + +#define flush_root_pgtbl_domain(d) \ +{ \ + if ( is_pv_domain(d) && (d)->arch.pv.xpti ) \ + flush_mask((d)->dirty_cpumask, FLUSH_ROOT_PGTBL); \ +} + +static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {} +static inline int invalidate_dcache_va_range(const void *p, + unsigned long size) +{ return -EOPNOTSUPP; } +static inline int clean_and_invalidate_dcache_va_range(const void *p, + unsigned long size) +{ + unsigned int order = get_order_from_bytes(size); + /* sub-page granularity support needs to be added if necessary */ + flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order)); + return 0; +} +static inline int clean_dcache_va_range(const void *p, unsigned long size) +{ + return clean_and_invalidate_dcache_va_range(p, size); +} + +unsigned int guest_flush_tlb_flags(const struct domain *d); +void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask); + +#endif /* __FLUSHTLB_H__ */ diff --git a/xen/arch/x86/include/asm/genapic.h b/xen/arch/x86/include/asm/genapic.h new file mode 100644 index 0000000000..51a65d3e0f --- /dev/null +++ b/xen/arch/x86/include/asm/genapic.h @@ -0,0 +1,70 @@ +#ifndef _ASM_GENAPIC_H +#define _ASM_GENAPIC_H 1 + +/* + * Generic APIC driver interface. + * + * An straight forward mapping of the APIC related parts of the + * x86 subarchitecture interface to a dynamic object. + * + * This is used by the "generic" x86 subarchitecture. + * + * Copyright 2003 Andi Kleen, SuSE Labs. + */ + +struct mpc_config_translation; +struct mpc_config_bus; +struct mp_config_table; +struct mpc_config_processor; + +struct genapic { + const char *name; + int (*probe)(void); + + /* Interrupt delivery parameters ('physical' vs. 'logical flat'). */ + int int_delivery_mode; + int int_dest_mode; + void (*init_apic_ldr)(void); + const cpumask_t *(*vector_allocation_cpumask)(int cpu); + unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); + void (*send_IPI_mask)(const cpumask_t *mask, int vector); + void (*send_IPI_self)(uint8_t vector); +}; + +#define APIC_INIT(aname, aprobe) \ + .name = aname, \ + .probe = aprobe + +extern struct genapic genapic; +extern const struct genapic apic_default; +extern const struct genapic apic_bigsmp; + +void send_IPI_self_legacy(uint8_t vector); + +void init_apic_ldr_flat(void); +unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask); +void send_IPI_mask_flat(const cpumask_t *mask, int vector); +const cpumask_t *vector_allocation_cpumask_flat(int cpu); +#define GENAPIC_FLAT \ + .int_delivery_mode = dest_LowestPrio, \ + .int_dest_mode = 1 /* logical delivery */, \ + .init_apic_ldr = init_apic_ldr_flat, \ + .vector_allocation_cpumask = vector_allocation_cpumask_flat, \ + .cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \ + .send_IPI_mask = send_IPI_mask_flat, \ + .send_IPI_self = send_IPI_self_legacy + +void init_apic_ldr_phys(void); +unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask); +void send_IPI_mask_phys(const cpumask_t *mask, int vector); +const cpumask_t *vector_allocation_cpumask_phys(int cpu); +#define GENAPIC_PHYS \ + .int_delivery_mode = dest_Fixed, \ + .int_dest_mode = 0 /* physical delivery */, \ + .init_apic_ldr = init_apic_ldr_phys, \ + .vector_allocation_cpumask = vector_allocation_cpumask_phys, \ + .cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \ + .send_IPI_mask = send_IPI_mask_phys, \ + .send_IPI_self = send_IPI_self_legacy + +#endif diff --git a/xen/arch/x86/include/asm/grant_table.h b/xen/arch/x86/include/asm/grant_table.h new file mode 100644 index 0000000000..a8a21439a4 --- /dev/null +++ b/xen/arch/x86/include/asm/grant_table.h @@ -0,0 +1,80 @@ +/****************************************************************************** + * include/asm-x86/grant_table.h + * + * Copyright (c) 2004-2005 K A Fraser + */ + +#ifndef __ASM_GRANT_TABLE_H__ +#define __ASM_GRANT_TABLE_H__ + +#include + +#include +#include + +#define INITIAL_NR_GRANT_FRAMES 1U + +struct grant_table_arch { +}; + +static inline int create_grant_host_mapping(uint64_t addr, mfn_t frame, + unsigned int flags, + unsigned int cache_flags) +{ + if ( paging_mode_external(current->domain) ) + return create_grant_p2m_mapping(addr, frame, flags, cache_flags); + return create_grant_pv_mapping(addr, frame, flags, cache_flags); +} + +static inline int replace_grant_host_mapping(uint64_t addr, mfn_t frame, + uint64_t new_addr, + unsigned int flags) +{ + if ( paging_mode_external(current->domain) ) + return replace_grant_p2m_mapping(addr, frame, new_addr, flags); + return replace_grant_pv_mapping(addr, frame, new_addr, flags); +} + +#define gnttab_init_arch(gt) 0 +#define gnttab_destroy_arch(gt) do {} while ( 0 ) +#define gnttab_set_frame_gfn(gt, st, idx, gfn, mfn) \ + (gfn_eq(gfn, INVALID_GFN) \ + ? guest_physmap_remove_page((gt)->domain, \ + gnttab_get_frame_gfn(gt, st, idx), \ + mfn, 0) \ + : 0 /* Handled in add_to_physmap_one(). */) +#define gnttab_get_frame_gfn(gt, st, idx) ({ \ + mfn_t mfn_ = (st) ? gnttab_status_mfn(gt, idx) \ + : gnttab_shared_mfn(gt, idx); \ + unsigned long gpfn_ = get_gpfn_from_mfn(mfn_x(mfn_)); \ + VALID_M2P(gpfn_) ? _gfn(gpfn_) : INVALID_GFN; \ +}) + +#define gnttab_shared_mfn(t, i) _mfn(__virt_to_mfn((t)->shared_raw[i])) + +#define gnttab_shared_gfn(d, t, i) mfn_to_gfn(d, gnttab_shared_mfn(t, i)) + +#define gnttab_status_mfn(t, i) _mfn(__virt_to_mfn((t)->status[i])) + +#define gnttab_status_gfn(d, t, i) mfn_to_gfn(d, gnttab_status_mfn(t, i)) + +#define gnttab_mark_dirty(d, f) paging_mark_dirty(d, f) + +static inline void gnttab_clear_flags(struct domain *d, + unsigned int mask, uint16_t *addr) +{ + /* Access must be confined to the specified 2 bytes. */ + asm volatile ("lock andw %1,%0" : "+m" (*addr) : "ir" ((uint16_t)~mask)); +} + +/* Foreign mappings of HVM-guest pages do not modify the type count. */ +#define gnttab_host_mapping_get_page_type(ro, ld, rd) \ + (!(ro) && (((ld) == (rd)) || !paging_mode_external(rd))) + +/* Done implicitly when page tables are destroyed. */ +#define gnttab_release_host_mappings(domain) ( paging_mode_external(domain) ) + +#define gnttab_need_iommu_mapping(d) \ + (!paging_mode_translate(d) && need_iommu_pt_sync(d)) + +#endif /* __ASM_GRANT_TABLE_H__ */ diff --git a/xen/arch/x86/include/asm/guest.h b/xen/arch/x86/include/asm/guest.h new file mode 100644 index 0000000000..ccf1ffbb72 --- /dev/null +++ b/xen/arch/x86/include/asm/guest.h @@ -0,0 +1,39 @@ +/****************************************************************************** + * asm-x86/guest.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2017 Citrix Systems Ltd. + */ + +#ifndef __X86_GUEST_H__ +#define __X86_GUEST_H__ + +#include +#include +#include +#include +#include +#include + +#endif /* __X86_GUEST_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest/hyperv-hcall.h b/xen/arch/x86/include/asm/guest/hyperv-hcall.h new file mode 100644 index 0000000000..423ca0860b --- /dev/null +++ b/xen/arch/x86/include/asm/guest/hyperv-hcall.h @@ -0,0 +1,97 @@ +/****************************************************************************** + * asm-x86/guest/hyperv-hcall.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2019 Microsoft. + */ + +#ifndef __X86_HYPERV_HCALL_H__ +#define __X86_HYPERV_HCALL_H__ + +#include +#include +#include + +#include +#include +#include + +static inline uint64_t hv_do_hypercall(uint64_t control, paddr_t input_addr, + paddr_t output_addr) +{ + uint64_t status; + register unsigned long r8 asm ( "r8" ) = output_addr; + + /* See TLFS for volatile registers */ + asm volatile ( "call hv_hcall_page" + : "=a" (status), "+c" (control), + "+d" (input_addr) ASM_CALL_CONSTRAINT + : "r" (r8) + : "memory" ); + + return status; +} + +static inline uint64_t hv_do_fast_hypercall(uint16_t code, + uint64_t input1, uint64_t input2) +{ + uint64_t status; + uint64_t control = code | HV_HYPERCALL_FAST_BIT; + register unsigned long r8 asm ( "r8" ) = input2; + + /* See TLFS for volatile registers */ + asm volatile ( "call hv_hcall_page" + : "=a" (status), "+c" (control), + "+d" (input1) ASM_CALL_CONSTRAINT + : "r" (r8) ); + + return status; +} + +static inline uint64_t hv_do_rep_hypercall(uint16_t code, uint16_t rep_count, + uint16_t varhead_size, + paddr_t input, paddr_t output) +{ + uint64_t control = code; + uint64_t status; + uint16_t rep_comp; + + control |= (uint64_t)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; + control |= (uint64_t)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; + + do { + status = hv_do_hypercall(control, input, output); + if ( (status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS ) + break; + + rep_comp = MASK_EXTR(status, HV_HYPERCALL_REP_COMP_MASK); + + control &= ~HV_HYPERCALL_REP_START_MASK; + control |= MASK_INSR(rep_comp, HV_HYPERCALL_REP_START_MASK); + } while ( rep_comp < rep_count ); + + return status; +} + +#endif /* __X86_HYPERV_HCALL_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest/hyperv-tlfs.h b/xen/arch/x86/include/asm/guest/hyperv-tlfs.h new file mode 100644 index 0000000000..03b71af82f --- /dev/null +++ b/xen/arch/x86/include/asm/guest/hyperv-tlfs.h @@ -0,0 +1,934 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file contains definitions from Hyper-V Hypervisor Top-Level Functional + * Specification (TLFS): + * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs + */ + +#ifndef _ASM_X86_HYPERV_TLFS_H +#define _ASM_X86_HYPERV_TLFS_H + +#include +#include +#include + +/* + * While not explicitly listed in the TLFS, Hyper-V always runs with a page size + * of 4096. These definitions are used when communicating with Hyper-V using + * guest physical pages and guest physical page addresses, since the guest page + * size may not be 4096 on all architectures. + */ +#define HV_HYP_PAGE_SHIFT 12 +#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT, UL) +#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) + +/* + * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent + * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). + */ +#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 +#define HYPERV_CPUID_INTERFACE 0x40000001 +#define HYPERV_CPUID_VERSION 0x40000002 +#define HYPERV_CPUID_FEATURES 0x40000003 +#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 +#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 +#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A + +#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000 +#define HYPERV_CPUID_MIN 0x40000005 +#define HYPERV_CPUID_MAX 0x4000ffff + +/* + * Feature identification. EAX indicates which features are available + * to the partition based upon the current partition privileges. + * These are HYPERV_CPUID_FEATURES.EAX bits. + */ + +/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ +#define HV_X64_MSR_VP_RUNTIME_AVAILABLE BIT(0, UL) +/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ +#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1, UL) +/* + * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM + * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available + */ +#define HV_X64_MSR_SYNIC_AVAILABLE BIT(2, UL) +/* + * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through + * HV_X64_MSR_STIMER3_COUNT) available + */ +#define HV_MSR_SYNTIMER_AVAILABLE BIT(3, UL) +/* + * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) + * are available + */ +#define HV_X64_MSR_APIC_ACCESS_AVAILABLE BIT(4, UL) +/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ +#define HV_X64_MSR_HYPERCALL_AVAILABLE BIT(5, UL) +/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ +#define HV_X64_MSR_VP_INDEX_AVAILABLE BIT(6, UL) +/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ +#define HV_X64_MSR_RESET_AVAILABLE BIT(7, UL) +/* + * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, + * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, + * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available + */ +#define HV_X64_MSR_STAT_PAGES_AVAILABLE BIT(8, UL) +/* Partition reference TSC MSR is available */ +#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9, UL) +/* Partition Guest IDLE MSR is available */ +#define HV_X64_MSR_GUEST_IDLE_AVAILABLE BIT(10, UL) +/* + * There is a single feature flag that signifies if the partition has access + * to MSRs with local APIC and TSC frequencies. + */ +#define HV_X64_ACCESS_FREQUENCY_MSRS BIT(11, UL) +/* AccessReenlightenmentControls privilege */ +#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13, UL) + +/* + * Feature identification: indicates which flags were specified at partition + * creation. The format is the same as the partition creation flag structure + * defined in section Partition Creation Flags. + * These are HYPERV_CPUID_FEATURES.EBX bits. + */ +#define HV_X64_CREATE_PARTITIONS BIT(0, UL) +#define HV_X64_ACCESS_PARTITION_ID BIT(1, UL) +#define HV_X64_ACCESS_MEMORY_POOL BIT(2, UL) +#define HV_X64_ADJUST_MESSAGE_BUFFERS BIT(3, UL) +#define HV_X64_POST_MESSAGES BIT(4, UL) +#define HV_X64_SIGNAL_EVENTS BIT(5, UL) +#define HV_X64_CREATE_PORT BIT(6, UL) +#define HV_X64_CONNECT_PORT BIT(7, UL) +#define HV_X64_ACCESS_STATS BIT(8, UL) +#define HV_X64_DEBUGGING BIT(11, UL) +#define HV_X64_CPU_POWER_MANAGEMENT BIT(12, UL) + +/* + * Feature identification. EDX indicates which miscellaneous features + * are available to the partition. + * These are HYPERV_CPUID_FEATURES.EDX bits. + */ +/* The MWAIT instruction is available (per section MONITOR / MWAIT) */ +#define HV_X64_MWAIT_AVAILABLE BIT(0, UL) +/* Guest debugging support is available */ +#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1, UL) +/* Performance Monitor support is available*/ +#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2, UL) +/* Support for physical CPU dynamic partitioning events is available*/ +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3, UL) +/* + * Support for passing hypercall input parameter block via XMM + * registers is available + */ +#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4, UL) +/* Support for a virtual guest idle state is available */ +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5, UL) +/* Frequency MSRs available */ +#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8, UL) +/* Crash MSR available */ +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10, UL) +/* stimer Direct Mode is available */ +#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19, UL) + +/* + * Implementation recommendations. Indicates which behaviors the hypervisor + * recommends the OS implement for optimal performance. + * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits. + */ +/* + * Recommend using hypercall for address space switches rather + * than MOV to CR3 instruction + */ +#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0, UL) +/* Recommend using hypercall for local TLB flushes rather + * than INVLPG or MOV to CR3 instructions */ +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1, UL) +/* + * Recommend using hypercall for remote TLB flushes rather + * than inter-processor interrupts + */ +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2, UL) +/* + * Recommend using MSRs for accessing APIC registers + * EOI, ICR and TPR rather than their memory-mapped counterparts + */ +#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3, UL) +/* Recommend using the hypervisor-provided MSR to initiate a system RESET */ +#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4, UL) +/* + * Recommend using relaxed timing for this partition. If used, + * the VM should disable any watchdog timeouts that rely on the + * timely delivery of external interrupts + */ +#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5, UL) + +/* + * Recommend not using Auto End-Of-Interrupt feature + */ +#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9, UL) + +/* + * Recommend using cluster IPI hypercalls. + */ +#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10, UL) + +/* Recommend using the newer ExProcessorMasks interface */ +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11, UL) + +/* Recommend using enlightened VMCS */ +#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14, UL) + +/* + * Virtual processor will never share a physical core with another virtual + * processor, except for virtual processors that are reported as sibling SMT + * threads. + */ +#define HV_X64_NO_NONARCH_CORESHARING BIT(18, UL) + +/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */ +#define HV_X64_NESTED_DIRECT_FLUSH BIT(17, UL) +#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18, UL) +#define HV_X64_NESTED_MSR_BITMAP BIT(19, UL) + +/* Hyper-V specific model specific registers (MSRs) */ + +/* MSR used to identify the guest OS. */ +#define HV_X64_MSR_GUEST_OS_ID 0x40000000 + +/* MSR used to setup pages used to communicate with the hypervisor. */ +#define HV_X64_MSR_HYPERCALL 0x40000001 + +/* MSR used to provide vcpu index */ +#define HV_X64_MSR_VP_INDEX 0x40000002 + +/* MSR used to reset the guest OS. */ +#define HV_X64_MSR_RESET 0x40000003 + +/* MSR used to provide vcpu runtime in 100ns units */ +#define HV_X64_MSR_VP_RUNTIME 0x40000010 + +/* MSR used to read the per-partition time reference counter */ +#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 + +/* A partition's reference time stamp counter (TSC) page */ +#define HV_X64_MSR_REFERENCE_TSC 0x40000021 + +/* MSR used to retrieve the TSC frequency */ +#define HV_X64_MSR_TSC_FREQUENCY 0x40000022 + +/* MSR used to retrieve the local APIC timer frequency */ +#define HV_X64_MSR_APIC_FREQUENCY 0x40000023 + +/* Define the virtual APIC registers */ +#define HV_X64_MSR_EOI 0x40000070 +#define HV_X64_MSR_ICR 0x40000071 +#define HV_X64_MSR_TPR 0x40000072 +#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 + +/* Define synthetic interrupt controller model specific registers. */ +#define HV_X64_MSR_SCONTROL 0x40000080 +#define HV_X64_MSR_SVERSION 0x40000081 +#define HV_X64_MSR_SIEFP 0x40000082 +#define HV_X64_MSR_SIMP 0x40000083 +#define HV_X64_MSR_EOM 0x40000084 +#define HV_X64_MSR_SINT0 0x40000090 +#define HV_X64_MSR_SINT1 0x40000091 +#define HV_X64_MSR_SINT2 0x40000092 +#define HV_X64_MSR_SINT3 0x40000093 +#define HV_X64_MSR_SINT4 0x40000094 +#define HV_X64_MSR_SINT5 0x40000095 +#define HV_X64_MSR_SINT6 0x40000096 +#define HV_X64_MSR_SINT7 0x40000097 +#define HV_X64_MSR_SINT8 0x40000098 +#define HV_X64_MSR_SINT9 0x40000099 +#define HV_X64_MSR_SINT10 0x4000009A +#define HV_X64_MSR_SINT11 0x4000009B +#define HV_X64_MSR_SINT12 0x4000009C +#define HV_X64_MSR_SINT13 0x4000009D +#define HV_X64_MSR_SINT14 0x4000009E +#define HV_X64_MSR_SINT15 0x4000009F + +/* + * Synthetic Timer MSRs. Four timers per vcpu. + */ +#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 +#define HV_X64_MSR_STIMER0_COUNT 0x400000B1 +#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 +#define HV_X64_MSR_STIMER1_COUNT 0x400000B3 +#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 +#define HV_X64_MSR_STIMER2_COUNT 0x400000B5 +#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 +#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 + +/* Hyper-V guest idle MSR */ +#define HV_X64_MSR_GUEST_IDLE 0x400000F0 + +/* Hyper-V guest crash notification MSR's */ +#define HV_X64_MSR_CRASH_P0 0x40000100 +#define HV_X64_MSR_CRASH_P1 0x40000101 +#define HV_X64_MSR_CRASH_P2 0x40000102 +#define HV_X64_MSR_CRASH_P3 0x40000103 +#define HV_X64_MSR_CRASH_P4 0x40000104 +#define HV_X64_MSR_CRASH_CTL 0x40000105 + +/* TSC emulation after migration */ +#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 +#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 +#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 + +/* + * Declare the MSR used to setup pages used to communicate with the hypervisor. + */ +union hv_x64_msr_hypercall_contents { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:11; + u64 guest_physical_address:52; + }; +}; + +/* + * TSC page layout. + */ +struct ms_hyperv_tsc_page { + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; + u64 reserved2[509]; +}; + +/* + * The guest OS needs to register the guest ID with the hypervisor. + * The guest ID is a 64 bit entity and the structure of this ID is + * specified in the Hyper-V specification: + * + * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx + * + * While the current guideline does not specify how Linux guest ID(s) + * need to be generated, our plan is to publish the guidelines for + * Linux and other guest operating systems that currently are hosted + * on Hyper-V. The implementation here conforms to this yet + * unpublished guidelines. + * + * + * Bit(s) + * 63 - Indicates if the OS is Open Source or not; 1 is Open Source + * 62:56 - Os Type; Linux 0x1, FreeBSD 0x2, Xen 0x3 + * 55:48 - Distro specific identification + * 47:16 - Guest OS version number + * 15:0 - Distro specific identification + * + * + */ + +#define HV_LINUX_VENDOR_ID 0x8100 +#define HV_XEN_VENDOR_ID 0x8300 +union hv_guest_os_id +{ + uint64_t raw; + struct + { + uint64_t build_number:16; + uint64_t service_pack:8; + uint64_t minor:8; + uint64_t major:8; + uint64_t os:8; + uint64_t vendor:16; + }; +}; + +struct hv_reenlightenment_control { + __u64 vector:8; + __u64 reserved1:8; + __u64 enabled:1; + __u64 reserved2:15; + __u64 target_vp:32; +}; + +struct hv_tsc_emulation_control { + __u64 enabled:1; + __u64 reserved:63; +}; + +struct hv_tsc_emulation_status { + __u64 inprogress:1; + __u64 reserved:63; +}; + +#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 +#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 +#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) + +/* + * Crash notification (HV_X64_MSR_CRASH_CTL) flags. + */ +#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) +#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) +#define HV_X64_MSR_CRASH_PARAMS \ + (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) + +#define HV_IPI_LOW_VECTOR 0x10 +#define HV_IPI_HIGH_VECTOR 0xff + +/* Declare the various hypercall operations. */ +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 +#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_SEND_IPI 0x000b +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 +#define HVCALL_SEND_IPI_EX 0x0015 +#define HVCALL_POST_MESSAGE 0x005c +#define HVCALL_SIGNAL_EVENT 0x005d +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 +#define HVCALL_EXT_CALL_QUERY_CAPABILITIES 0x8001 + +#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001 +#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12 +#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) + +/* Hyper-V Enlightened VMCS version mask in nested features CPUID */ +#define HV_X64_ENLIGHTENED_VMCS_VERSION 0xff + +#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001 +#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12 + +#define HV_PROCESSOR_POWER_STATE_C0 0 +#define HV_PROCESSOR_POWER_STATE_C1 1 +#define HV_PROCESSOR_POWER_STATE_C2 2 +#define HV_PROCESSOR_POWER_STATE_C3 3 + +#define HV_FLUSH_ALL_PROCESSORS BIT(0, UL) +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1, UL) +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2, UL) +#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3, UL) + +enum HV_GENERIC_SET_FORMAT { + HV_GENERIC_SET_SPARSE_4K, + HV_GENERIC_SET_ALL, +}; + +#define HV_HYPERCALL_RESULT_MASK 0xffff /* GENMASK_ULL(15, 0) */ +#define HV_HYPERCALL_FAST_BIT BIT(16, UL) +#define HV_HYPERCALL_VARHEAD_OFFSET 17 +#define HV_HYPERCALL_REP_COMP_OFFSET 32 +#define HV_HYPERCALL_REP_COMP_MASK (0xfffULL << HV_HYPERCALL_REP_COMP_OFFSET) /* GENMASK_ULL(43, 32) */ +#define HV_HYPERCALL_REP_START_OFFSET 48 +#define HV_HYPERCALL_REP_START_MASK (0xfffULL << HV_HYPERCALL_REP_START_OFFSET) /* GENMASK_ULL(59, 48) */ + +/* hypercall status code */ +#define HV_STATUS_SUCCESS 0 +#define HV_STATUS_INVALID_HYPERCALL_CODE 2 +#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 +#define HV_STATUS_INVALID_ALIGNMENT 4 +#define HV_STATUS_INVALID_PARAMETER 5 +#define HV_STATUS_INSUFFICIENT_MEMORY 11 +#define HV_STATUS_INVALID_PORT_ID 17 +#define HV_STATUS_INVALID_CONNECTION_ID 18 +#define HV_STATUS_INSUFFICIENT_BUFFERS 19 + +/* + * The Hyper-V TimeRefCount register and the TSC + * page provide a guest VM clock with 100ns tick rate + */ +#define HV_CLOCK_HZ (NSEC_PER_SEC/100) + +typedef struct _HV_REFERENCE_TSC_PAGE { + __u32 tsc_sequence; + __u32 res1; + __u64 tsc_scale; + __s64 tsc_offset; +} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; + +/* Define the number of synthetic interrupt sources. */ +#define HV_SYNIC_SINT_COUNT (16) +/* Define the expected SynIC version. */ +#define HV_SYNIC_VERSION_1 (0x1) +/* Valid SynIC vectors are 16-255. */ +#define HV_SYNIC_FIRST_VALID_VECTOR (16) + +#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0) +#define HV_SYNIC_SIMP_ENABLE (1ULL << 0) +#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0) +#define HV_SYNIC_SINT_MASKED (1ULL << 16) +#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) +#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) + +#define HV_SYNIC_STIMER_COUNT (4) + +/* Define synthetic interrupt controller message constants. */ +#define HV_MESSAGE_SIZE (256) +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) + +/* Define hypervisor message types. */ +enum hv_message_type { + HVMSG_NONE = 0x00000000, + + /* Memory access messages. */ + HVMSG_UNMAPPED_GPA = 0x80000000, + HVMSG_GPA_INTERCEPT = 0x80000001, + + /* Timer notification messages. */ + HVMSG_TIMER_EXPIRED = 0x80000010, + + /* Error messages. */ + HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, + HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, + HVMSG_UNSUPPORTED_FEATURE = 0x80000022, + + /* Trace buffer complete messages. */ + HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, + + /* Platform-specific processor intercept messages. */ + HVMSG_X64_IOPORT_INTERCEPT = 0x80010000, + HVMSG_X64_MSR_INTERCEPT = 0x80010001, + HVMSG_X64_CPUID_INTERCEPT = 0x80010002, + HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, + HVMSG_X64_APIC_EOI = 0x80010004, + HVMSG_X64_LEGACY_FP_ERROR = 0x80010005 +}; + +/* Define synthetic interrupt controller message flags. */ +union hv_message_flags { + __u8 asu8; + struct { + __u8 msg_pending:1; + __u8 reserved:7; + }; +}; + +/* Define port identifier type. */ +union hv_port_id { + __u32 asu32; + struct { + __u32 id:24; + __u32 reserved:8; + } u; +}; + +/* Define synthetic interrupt controller message header. */ +struct hv_message_header { + __u32 message_type; + __u8 payload_size; + union hv_message_flags message_flags; + __u8 reserved[2]; + union { + __u64 sender; + union hv_port_id port; + }; +}; + +/* Define synthetic interrupt controller message format. */ +struct hv_message { + struct hv_message_header header; + union { + __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; + } u; +}; + +/* Define the synthetic interrupt message page layout. */ +struct hv_message_page { + struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; +}; + +/* Define timer message payload structure. */ +struct hv_timer_message_payload { + __u32 timer_index; + __u32 reserved; + __u64 expiration_time; /* When the timer expired */ + __u64 delivery_time; /* When the message was delivered */ +}; + +struct hv_nested_enlightenments_control { + struct { + __u32 directhypercall:1; + __u32 reserved:31; + } features; + struct { + __u32 reserved; + } hypercallControls; +}; + +union hv_vp_assist_page_msr +{ + uint64_t raw; + struct + { + uint64_t enabled:1; + uint64_t reserved_preserved:11; + uint64_t pfn:48; + }; +}; + +/* Define virtual processor assist page structure. */ +struct hv_vp_assist_page { + __u32 apic_assist; + __u32 reserved1; + __u64 vtl_control[3]; + struct hv_nested_enlightenments_control nested_control; + __u8 enlighten_vmentry; + __u8 reserved2[7]; + __u64 current_nested_vmcs; +}; + +struct hv_enlightened_vmcs { + u32 revision_id; + u32 abort; + + u16 host_es_selector; + u16 host_cs_selector; + u16 host_ss_selector; + u16 host_ds_selector; + u16 host_fs_selector; + u16 host_gs_selector; + u16 host_tr_selector; + + u16 padding16_1; + + u64 host_ia32_pat; + u64 host_ia32_efer; + + u64 host_cr0; + u64 host_cr3; + u64 host_cr4; + + u64 host_ia32_sysenter_esp; + u64 host_ia32_sysenter_eip; + u64 host_rip; + u32 host_ia32_sysenter_cs; + + u32 pin_based_vm_exec_control; + u32 vm_exit_controls; + u32 secondary_vm_exec_control; + + u64 io_bitmap_a; + u64 io_bitmap_b; + u64 msr_bitmap; + + u16 guest_es_selector; + u16 guest_cs_selector; + u16 guest_ss_selector; + u16 guest_ds_selector; + u16 guest_fs_selector; + u16 guest_gs_selector; + u16 guest_ldtr_selector; + u16 guest_tr_selector; + + u32 guest_es_limit; + u32 guest_cs_limit; + u32 guest_ss_limit; + u32 guest_ds_limit; + u32 guest_fs_limit; + u32 guest_gs_limit; + u32 guest_ldtr_limit; + u32 guest_tr_limit; + u32 guest_gdtr_limit; + u32 guest_idtr_limit; + + u32 guest_es_ar_bytes; + u32 guest_cs_ar_bytes; + u32 guest_ss_ar_bytes; + u32 guest_ds_ar_bytes; + u32 guest_fs_ar_bytes; + u32 guest_gs_ar_bytes; + u32 guest_ldtr_ar_bytes; + u32 guest_tr_ar_bytes; + + u64 guest_es_base; + u64 guest_cs_base; + u64 guest_ss_base; + u64 guest_ds_base; + u64 guest_fs_base; + u64 guest_gs_base; + u64 guest_ldtr_base; + u64 guest_tr_base; + u64 guest_gdtr_base; + u64 guest_idtr_base; + + u64 padding64_1[3]; + + u64 vm_exit_msr_store_addr; + u64 vm_exit_msr_load_addr; + u64 vm_entry_msr_load_addr; + + u64 cr3_target_value0; + u64 cr3_target_value1; + u64 cr3_target_value2; + u64 cr3_target_value3; + + u32 page_fault_error_code_mask; + u32 page_fault_error_code_match; + + u32 cr3_target_count; + u32 vm_exit_msr_store_count; + u32 vm_exit_msr_load_count; + u32 vm_entry_msr_load_count; + + u64 tsc_offset; + u64 virtual_apic_page_addr; + u64 vmcs_link_pointer; + + u64 guest_ia32_debugctl; + u64 guest_ia32_pat; + u64 guest_ia32_efer; + + u64 guest_pdptr0; + u64 guest_pdptr1; + u64 guest_pdptr2; + u64 guest_pdptr3; + + u64 guest_pending_dbg_exceptions; + u64 guest_sysenter_esp; + u64 guest_sysenter_eip; + + u32 guest_activity_state; + u32 guest_sysenter_cs; + + u64 cr0_guest_host_mask; + u64 cr4_guest_host_mask; + u64 cr0_read_shadow; + u64 cr4_read_shadow; + u64 guest_cr0; + u64 guest_cr3; + u64 guest_cr4; + u64 guest_dr7; + + u64 host_fs_base; + u64 host_gs_base; + u64 host_tr_base; + u64 host_gdtr_base; + u64 host_idtr_base; + u64 host_rsp; + + u64 ept_pointer; + + u16 virtual_processor_id; + u16 padding16_2[3]; + + u64 padding64_2[5]; + u64 guest_physical_address; + + u32 vm_instruction_error; + u32 vm_exit_reason; + u32 vm_exit_intr_info; + u32 vm_exit_intr_error_code; + u32 idt_vectoring_info_field; + u32 idt_vectoring_error_code; + u32 vm_exit_instruction_len; + u32 vmx_instruction_info; + + u64 exit_qualification; + u64 exit_io_instruction_ecx; + u64 exit_io_instruction_esi; + u64 exit_io_instruction_edi; + u64 exit_io_instruction_eip; + + u64 guest_linear_address; + u64 guest_rsp; + u64 guest_rflags; + + u32 guest_interruptibility_info; + u32 cpu_based_vm_exec_control; + u32 exception_bitmap; + u32 vm_entry_controls; + u32 vm_entry_intr_info_field; + u32 vm_entry_exception_error_code; + u32 vm_entry_instruction_len; + u32 tpr_threshold; + + u64 guest_rip; + + u32 hv_clean_fields; + u32 hv_padding_32; + u32 hv_synthetic_controls; + struct { + u32 nested_flush_hypercall:1; + u32 msr_bitmap:1; + u32 reserved:30; + } hv_enlightenments_control; + u32 hv_vp_id; + + u64 hv_vm_id; + u64 partition_assist_page; + u64 padding64_4[4]; + u64 guest_bndcfgs; + u64 padding64_5[7]; + u64 xss_exit_bitmap; + u64 padding64_6[7]; +}; + +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0 +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP BIT(1, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2 BIT(2, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1 BIT(3, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC BIT(4, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT BIT(5, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY BIT(6, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN BIT(7, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR BIT(8, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT BIT(9, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC BIT(10, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1 BIT(11, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2 BIT(12, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER BIT(13, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1 BIT(14, UL) +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15, UL) + +#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF + +/* Define synthetic interrupt controller flag constants. */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) +#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long)) + +/* + * Synthetic timer configuration. + */ +union hv_stimer_config { + u64 as_uint64; + struct { + u64 enable:1; + u64 periodic:1; + u64 lazy:1; + u64 auto_enable:1; + u64 apic_vector:8; + u64 direct_mode:1; + u64 reserved_z0:3; + u64 sintx:4; + u64 reserved_z1:44; + }; +}; + + +/* Define the synthetic interrupt controller event flags format. */ +union hv_synic_event_flags { + unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT]; +}; + +/* Define SynIC control register. */ +union hv_synic_scontrol { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:63; + }; +}; + +/* Define synthetic interrupt source. */ +union hv_synic_sint { + u64 as_uint64; + struct { + u64 vector:8; + u64 reserved1:8; + u64 masked:1; + u64 auto_eoi:1; + u64 polling:1; + u64 reserved2:45; + }; +}; + +/* Define the format of the SIMP register */ +union hv_synic_simp { + u64 as_uint64; + struct { + u64 simp_enabled:1; + u64 preserved:11; + u64 base_simp_gpa:52; + }; +}; + +/* Define the format of the SIEFP register */ +union hv_synic_siefp { + u64 as_uint64; + struct { + u64 siefp_enabled:1; + u64 preserved:11; + u64 base_siefp_gpa:52; + }; +}; + +struct hv_vpset { + u64 format; + u64 valid_bank_mask; + u64 bank_contents[]; +}; + +/* HvCallSendSyntheticClusterIpi hypercall */ +struct hv_send_ipi { + u32 vector; + u32 reserved; + u64 cpu_mask; +}; + +/* HvCallSendSyntheticClusterIpiEx hypercall */ +struct hv_send_ipi_ex { + u32 vector; + u32 reserved; + struct hv_vpset vp_set; +}; + +/* HvFlushGuestPhysicalAddressSpace hypercalls */ +struct hv_guest_mapping_flush { + u64 address_space; + u64 flags; +}; + +/* + * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited + * by the bitwidth of "additional_pages" in union hv_gpa_page_range. + */ +#define HV_MAX_FLUSH_PAGES (2048) + +/* HvFlushGuestPhysicalAddressList hypercall */ +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages:11; + u64 largepage:1; + u64 basepfn:52; + } page; +}; + +/* + * All input flush parameters should be in single page. The max flush + * count is equal with how many entries of union hv_gpa_page_range can + * be populated into the input parameter page. + */ +#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \ + sizeof(union hv_gpa_page_range)) + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; +}; + +/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ +struct hv_tlb_flush { + u64 address_space; + u64 flags; + u64 processor_mask; + u64 gva_list[]; +}; + +/* HvFlushVirtualAddressSpaceEx hypercall */ +struct hv_tlb_flush_ex { + u64 address_space; + u64 flags; + struct hv_vpset hv_vp_set; + /* u64 gva_list[]; */ +}; + +struct hv_partition_assist_pg { + u32 tlb_lock_count; +}; +#endif diff --git a/xen/arch/x86/include/asm/guest/hyperv.h b/xen/arch/x86/include/asm/guest/hyperv.h new file mode 100644 index 0000000000..1a1b47831c --- /dev/null +++ b/xen/arch/x86/include/asm/guest/hyperv.h @@ -0,0 +1,86 @@ +/****************************************************************************** + * asm-x86/guest/hyperv.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2019 Microsoft. + */ + +#ifndef __X86_GUEST_HYPERV_H__ +#define __X86_GUEST_HYPERV_H__ + +#include + +/* Use top-most MFN for hypercall page */ +#define HV_HCALL_MFN (((1ull << paddr_bits) - 1) >> HV_HYP_PAGE_SHIFT) + +/* + * The specification says: "The partition reference time is computed + * by the following formula: + * + * ReferenceTime = ((VirtualTsc * TscScale) >> 64) + TscOffset + * + * The multiplication is a 64 bit multiplication, which results in a + * 128 bit number which is then shifted 64 times to the right to obtain + * the high 64 bits." + */ +static inline uint64_t hv_scale_tsc(uint64_t tsc, uint64_t scale, + int64_t offset) +{ + uint64_t result; + + /* + * Quadword MUL takes an implicit operand in RAX, and puts the result + * in RDX:RAX. Because we only want the result of the multiplication + * after shifting right by 64 bits, we therefore only need the content + * of RDX. + */ + asm ( "mulq %[scale]" + : "+a" (tsc), "=d" (result) + : [scale] "rm" (scale) ); + + return result + offset; +} + +#ifdef CONFIG_HYPERV_GUEST + +#include + +struct ms_hyperv_info { + uint32_t features; + uint32_t misc_features; + uint32_t hints; + uint32_t nested_features; + uint32_t max_vp_index; + uint32_t max_lp_index; +}; +extern struct ms_hyperv_info ms_hyperv; + +const struct hypervisor_ops *hyperv_probe(void); + +#else + +static inline const struct hypervisor_ops *hyperv_probe(void) { return NULL; } + +#endif /* CONFIG_HYPERV_GUEST */ +#endif /* __X86_GUEST_HYPERV_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest/hypervisor.h b/xen/arch/x86/include/asm/guest/hypervisor.h new file mode 100644 index 0000000000..0a6c3b47ab --- /dev/null +++ b/xen/arch/x86/include/asm/guest/hypervisor.h @@ -0,0 +1,85 @@ +/****************************************************************************** + * asm-x86/guest/hypervisor.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2019 Microsoft. + */ + +#ifndef __X86_HYPERVISOR_H__ +#define __X86_HYPERVISOR_H__ + +#include + +#include + +struct hypervisor_ops { + /* Name of the hypervisor */ + const char *name; + /* Main setup routine */ + void (*setup)(void); + /* AP setup */ + int (*ap_setup)(void); + /* Resume from suspension */ + void (*resume)(void); + /* Fix up e820 map */ + void (*e820_fixup)(struct e820map *e820); + /* L0 assisted TLB flush */ + int (*flush_tlb)(const cpumask_t *mask, const void *va, unsigned int flags); +}; + +#ifdef CONFIG_GUEST + +const char *hypervisor_probe(void); +void hypervisor_setup(void); +int hypervisor_ap_setup(void); +void hypervisor_resume(void); +void hypervisor_e820_fixup(struct e820map *e820); +/* + * L0 assisted TLB flush. + * mask: cpumask of the dirty vCPUs that should be flushed. + * va: linear address to flush, or NULL for entire address space. + * flags: flags for flushing, including the order of va. + */ +int hypervisor_flush_tlb(const cpumask_t *mask, const void *va, + unsigned int flags); + +#else + +#include +#include + +static inline const char *hypervisor_probe(void) { return NULL; } +static inline void hypervisor_setup(void) { ASSERT_UNREACHABLE(); } +static inline int hypervisor_ap_setup(void) { return 0; } +static inline void hypervisor_resume(void) { ASSERT_UNREACHABLE(); } +static inline void hypervisor_e820_fixup(struct e820map *e820) {} +static inline int hypervisor_flush_tlb(const cpumask_t *mask, const void *va, + unsigned int flags) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_GUEST */ + +#endif /* __X86_HYPERVISOR_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest/pvh-boot.h b/xen/arch/x86/include/asm/guest/pvh-boot.h new file mode 100644 index 0000000000..48ffd1a0b1 --- /dev/null +++ b/xen/arch/x86/include/asm/guest/pvh-boot.h @@ -0,0 +1,58 @@ +/****************************************************************************** + * asm-x86/guest/pvh-boot.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2017 Citrix Systems Ltd. + */ + +#ifndef __X86_PVH_BOOT_H__ +#define __X86_PVH_BOOT_H__ + +#include + +#ifdef CONFIG_PVH_GUEST + +extern bool pvh_boot; + +void pvh_init(multiboot_info_t **mbi, module_t **mod); +void pvh_print_info(void); + +#else + +#include + +#define pvh_boot 0 + +static inline void pvh_init(multiboot_info_t **mbi, module_t **mod) +{ + ASSERT_UNREACHABLE(); +} + +static inline void pvh_print_info(void) +{ + ASSERT_UNREACHABLE(); +} + +#endif /* CONFIG_PVH_GUEST */ +#endif /* __X86_PVH_BOOT_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest/xen-hcall.h b/xen/arch/x86/include/asm/guest/xen-hcall.h new file mode 100644 index 0000000000..03d5868a9e --- /dev/null +++ b/xen/arch/x86/include/asm/guest/xen-hcall.h @@ -0,0 +1,212 @@ +/****************************************************************************** + * asm-x86/guest/xen-hcall.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2017 Citrix Systems Ltd. + */ + +#ifndef __X86_XEN_HYPERCALL_H__ +#define __X86_XEN_HYPERCALL_H__ + +#ifdef CONFIG_XEN_GUEST + +#include + +#include + +#include +#include +#include + +#include + +/* + * Hypercall primatives for 64bit + * + * Inputs: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6) + */ + +#define _hypercall64_1(type, hcall, a1) \ + ({ \ + long res, tmp__; \ + asm volatile ( \ + "call hypercall_page + %c[offset]" \ + : "=a" (res), "=D" (tmp__) ASM_CALL_CONSTRAINT \ + : [offset] "i" (hcall * 32), \ + "1" ((long)(a1)) \ + : "memory" ); \ + (type)res; \ + }) + +#define _hypercall64_2(type, hcall, a1, a2) \ + ({ \ + long res, tmp__; \ + asm volatile ( \ + "call hypercall_page + %c[offset]" \ + : "=a" (res), "=D" (tmp__), "=S" (tmp__) \ + ASM_CALL_CONSTRAINT \ + : [offset] "i" (hcall * 32), \ + "1" ((long)(a1)), "2" ((long)(a2)) \ + : "memory" ); \ + (type)res; \ + }) + +#define _hypercall64_3(type, hcall, a1, a2, a3) \ + ({ \ + long res, tmp__; \ + asm volatile ( \ + "call hypercall_page + %c[offset]" \ + : "=a" (res), "=D" (tmp__), "=S" (tmp__), "=d" (tmp__) \ + ASM_CALL_CONSTRAINT \ + : [offset] "i" (hcall * 32), \ + "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)) \ + : "memory" ); \ + (type)res; \ + }) + +#define _hypercall64_4(type, hcall, a1, a2, a3, a4) \ + ({ \ + long res, tmp__; \ + register long _a4 asm ("r10") = ((long)(a4)); \ + asm volatile ( \ + "call hypercall_page + %c[offset]" \ + : "=a" (res), "=D" (tmp__), "=S" (tmp__), "=d" (tmp__), \ + "=&r" (tmp__) ASM_CALL_CONSTRAINT \ + : [offset] "i" (hcall * 32), \ + "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)), \ + "4" (_a4) \ + : "memory" ); \ + (type)res; \ + }) + +/* + * Primitive Hypercall wrappers + */ +static inline long xen_hypercall_sched_op(unsigned int cmd, void *arg) +{ + return _hypercall64_2(long, __HYPERVISOR_sched_op, cmd, arg); +} + +static inline long xen_hypercall_memory_op(unsigned int cmd, void *arg) +{ + return _hypercall64_2(long, __HYPERVISOR_memory_op, cmd, arg); +} + +static inline int xen_hypercall_vcpu_op(unsigned int cmd, unsigned int vcpu, + void *arg) +{ + return _hypercall64_3(long, __HYPERVISOR_vcpu_op, cmd, vcpu, arg); +} + +static inline long xen_hypercall_event_channel_op(unsigned int cmd, void *arg) +{ + return _hypercall64_2(long, __HYPERVISOR_event_channel_op, cmd, arg); +} + +static inline long xen_hypercall_grant_table_op(unsigned int cmd, void *arg, + unsigned int count) +{ + return _hypercall64_3(long, __HYPERVISOR_grant_table_op, cmd, arg, count); +} + +static inline long xen_hypercall_hvm_op(unsigned int op, void *arg) +{ + return _hypercall64_2(long, __HYPERVISOR_hvm_op, op, arg); +} + +/* + * Higher level hypercall helpers + */ +static inline void xen_hypercall_console_write( + const char *buf, unsigned int count) +{ + (void)_hypercall64_3(long, __HYPERVISOR_console_io, + CONSOLEIO_write, count, buf); +} + +static inline long xen_hypercall_shutdown(unsigned int reason) +{ + struct sched_shutdown s = { .reason = reason }; + return xen_hypercall_sched_op(SCHEDOP_shutdown, &s); +} + +static inline long xen_hypercall_evtchn_send(evtchn_port_t port) +{ + struct evtchn_send send = { .port = port }; + + return xen_hypercall_event_channel_op(EVTCHNOP_send, &send); +} + +static inline long xen_hypercall_evtchn_unmask(evtchn_port_t port) +{ + struct evtchn_unmask unmask = { .port = port }; + + return xen_hypercall_event_channel_op(EVTCHNOP_unmask, &unmask); +} + +static inline long xen_hypercall_hvm_get_param(uint32_t index, uint64_t *value) +{ + struct xen_hvm_param xhv = { + .domid = DOMID_SELF, + .index = index, + }; + long ret = xen_hypercall_hvm_op(HVMOP_get_param, &xhv); + + if ( ret == 0 ) + *value = xhv.value; + + return ret; +} + +static inline long xen_hypercall_set_evtchn_upcall_vector( + unsigned int cpu, unsigned int vector) +{ + struct xen_hvm_evtchn_upcall_vector a = { + .vcpu = cpu, + .vector = vector, + }; + + return xen_hypercall_hvm_op(HVMOP_set_evtchn_upcall_vector, &a); +} + +#else /* CONFIG_XEN_GUEST */ + +#include + +#include + +static inline void xen_hypercall_console_write( + const char *buf, unsigned int count) +{ + ASSERT_UNREACHABLE(); +} + +static inline long xen_hypercall_shutdown(unsigned int reason) +{ + ASSERT_UNREACHABLE(); + return 0; +} + +#endif /* CONFIG_XEN_GUEST */ +#endif /* __X86_XEN_HYPERCALL_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest/xen.h b/xen/arch/x86/include/asm/guest/xen.h new file mode 100644 index 0000000000..2042a9a0c2 --- /dev/null +++ b/xen/arch/x86/include/asm/guest/xen.h @@ -0,0 +1,61 @@ +/****************************************************************************** + * asm-x86/guest/xen.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2017 Citrix Systems Ltd. + */ + +#ifndef __X86_GUEST_XEN_H__ +#define __X86_GUEST_XEN_H__ + +#include + +#include +#include +#include + +#define XEN_shared_info ((struct shared_info *)fix_to_virt(FIX_XEN_SHARED_INFO)) + +#ifdef CONFIG_XEN_GUEST + +extern bool xen_guest; +extern bool pv_console; +extern uint32_t xen_cpuid_base; + +const struct hypervisor_ops *xg_probe(void); +int xg_alloc_unused_page(mfn_t *mfn); +int xg_free_unused_page(mfn_t mfn); + +DECLARE_PER_CPU(unsigned int, vcpu_id); +DECLARE_PER_CPU(struct vcpu_info *, vcpu_info); + +#else + +#define xen_guest 0 +#define pv_console 0 + +static inline const struct hypervisor_ops *xg_probe(void) { return NULL; } + +#endif /* CONFIG_XEN_GUEST */ +#endif /* __X86_GUEST_XEN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest_access.h b/xen/arch/x86/include/asm/guest_access.h new file mode 100644 index 0000000000..dbf789fa58 --- /dev/null +++ b/xen/arch/x86/include/asm/guest_access.h @@ -0,0 +1,59 @@ +/****************************************************************************** + * guest_access.h + * + * Copyright (c) 2006, K A Fraser + */ + +#ifndef __ASM_X86_GUEST_ACCESS_H__ +#define __ASM_X86_GUEST_ACCESS_H__ + +#include +#include +#include +#include + +/* Raw access functions: no type checking. */ +#define raw_copy_to_guest(dst, src, len) \ + (is_hvm_vcpu(current) ? \ + copy_to_user_hvm((dst), (src), (len)) : \ + copy_to_guest_pv(dst, src, len)) +#define raw_copy_from_guest(dst, src, len) \ + (is_hvm_vcpu(current) ? \ + copy_from_user_hvm((dst), (src), (len)) : \ + copy_from_guest_pv(dst, src, len)) +#define raw_clear_guest(dst, len) \ + (is_hvm_vcpu(current) ? \ + clear_user_hvm((dst), (len)) : \ + clear_guest_pv(dst, len)) +#define __raw_copy_to_guest(dst, src, len) \ + (is_hvm_vcpu(current) ? \ + copy_to_user_hvm((dst), (src), (len)) : \ + __copy_to_guest_pv(dst, src, len)) +#define __raw_copy_from_guest(dst, src, len) \ + (is_hvm_vcpu(current) ? \ + copy_from_user_hvm((dst), (src), (len)) : \ + __copy_from_guest_pv(dst, src, len)) + +/* + * Pre-validate a guest handle. + * Allows use of faster __copy_* functions. + */ +#define guest_handle_okay(hnd, nr) \ + (paging_mode_external(current->domain) || \ + array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))) +#define guest_handle_subrange_okay(hnd, first, last) \ + (paging_mode_external(current->domain) || \ + array_access_ok((hnd).p + (first), \ + (last)-(first)+1, \ + sizeof(*(hnd).p))) + +#endif /* __ASM_X86_GUEST_ACCESS_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest_atomics.h b/xen/arch/x86/include/asm/guest_atomics.h new file mode 100644 index 0000000000..c2dec0d650 --- /dev/null +++ b/xen/arch/x86/include/asm/guest_atomics.h @@ -0,0 +1,33 @@ +#ifndef _X86_GUEST_ATOMICS_H +#define _X86_GUEST_ATOMICS_H + +#include + +/* + * It is safe to use the atomics helpers on x86 on memory shared with + * the guests. + */ +#define guest_set_bit(d, nr, p) ((void)(d), set_bit(nr, p)) +#define guest_clear_bit(d, nr, p) ((void)(d), clear_bit(nr, p)) +#define guest_change_bit(d, nr, p) ((void)(d), change_bit(nr, p)) +#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p)) + +#define guest_test_and_set_bit(d, nr, p) \ + ((void)(d), test_and_set_bit(nr, p)) +#define guest_test_and_clear_bit(d, nr, p) \ + ((void)(d), test_and_clear_bit(nr, p)) +#define guest_test_and_change_bit(d, nr, p) \ + ((void)(d), test_and_change_bit(nr, p)) + +#define guest_cmpxchg(d, ptr, o, n) ((void)(d), cmpxchg(ptr, o, n)) +#define guest_cmpxchg64 guest_cmpxchg + +#endif /* _X86_GUEST_ATOMICS_H */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/guest_pt.h b/xen/arch/x86/include/asm/guest_pt.h new file mode 100644 index 0000000000..6647ccfb85 --- /dev/null +++ b/xen/arch/x86/include/asm/guest_pt.h @@ -0,0 +1,468 @@ +/****************************************************************************** + * xen/asm-x86/guest_pt.h + * + * Types and accessors for guest pagetable entries, as distinct from + * Xen's pagetable types. + * + * Users must #define GUEST_PAGING_LEVELS to 2, 3 or 4 before including + * this file. + * + * Parts of this code are Copyright (c) 2006 by XenSource Inc. + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef _XEN_ASM_GUEST_PT_H +#define _XEN_ASM_GUEST_PT_H + +#if !defined(GUEST_PAGING_LEVELS) +#error GUEST_PAGING_LEVELS not defined +#endif + +static inline paddr_t +gfn_to_paddr(gfn_t gfn) +{ + return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT; +} + +/* Override get_gfn to work with gfn_t */ +#undef get_gfn +#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC) + +/* Mask covering the reserved bits from superpage alignment. */ +#define SUPERPAGE_RSVD(bit) \ + (((1ul << (bit)) - 1) & ~(_PAGE_PSE_PAT | (_PAGE_PSE_PAT - 1ul))) + +static inline uint32_t fold_pse36(uint64_t val) +{ + return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 32)) >> (32 - 13)); +} +static inline uint64_t unfold_pse36(uint32_t val) +{ + return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 13)) << (32 - 13)); +} + +/* Types of the guest's page tables and access functions for them */ + +#if GUEST_PAGING_LEVELS == 2 + +#define GUEST_L1_PAGETABLE_ENTRIES 1024 +#define GUEST_L2_PAGETABLE_ENTRIES 1024 + +#define GUEST_L1_PAGETABLE_SHIFT 12 +#define GUEST_L2_PAGETABLE_SHIFT 22 + +#define GUEST_L1_PAGETABLE_RSVD 0 +#define GUEST_L2_PAGETABLE_RSVD 0 + +typedef uint32_t guest_intpte_t; +typedef struct { guest_intpte_t l1; } guest_l1e_t; +typedef struct { guest_intpte_t l2; } guest_l2e_t; + +#define PRI_gpte "08x" + +static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) +{ return _gfn(gl1e.l1 >> PAGE_SHIFT); } +static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) +{ return _gfn(gl2e.l2 >> PAGE_SHIFT); } + +static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) +{ return gl1e.l1 & 0xfff; } +static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) +{ return gl2e.l2 & 0xfff; } + +static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e) +{ return 0; } +static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e) +{ return 0; } + +static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) +{ return (guest_l1e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } +static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) +{ return (guest_l2e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } + +#define guest_l1_table_offset(_va) \ + (((_va) >> GUEST_L1_PAGETABLE_SHIFT) & (GUEST_L1_PAGETABLE_ENTRIES - 1)) +#define guest_l2_table_offset(_va) \ + (((_va) >> GUEST_L2_PAGETABLE_SHIFT) & (GUEST_L2_PAGETABLE_ENTRIES - 1)) + +#else /* GUEST_PAGING_LEVELS != 2 */ + +#if GUEST_PAGING_LEVELS == 3 + +#define GUEST_L1_PAGETABLE_ENTRIES 512 +#define GUEST_L2_PAGETABLE_ENTRIES 512 +#define GUEST_L3_PAGETABLE_ENTRIES 4 + +#define GUEST_L1_PAGETABLE_SHIFT 12 +#define GUEST_L2_PAGETABLE_SHIFT 21 +#define GUEST_L3_PAGETABLE_SHIFT 30 + +#define GUEST_L1_PAGETABLE_RSVD 0x7ff0000000000000ul +#define GUEST_L2_PAGETABLE_RSVD 0x7ff0000000000000ul +#define GUEST_L3_PAGETABLE_RSVD \ + (0xfff0000000000000ul | _PAGE_GLOBAL | _PAGE_PSE | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_USER | _PAGE_RW) + +#else /* GUEST_PAGING_LEVELS == 4 */ + +#define GUEST_L1_PAGETABLE_ENTRIES 512 +#define GUEST_L2_PAGETABLE_ENTRIES 512 +#define GUEST_L3_PAGETABLE_ENTRIES 512 +#define GUEST_L4_PAGETABLE_ENTRIES 512 + +#define GUEST_L1_PAGETABLE_SHIFT 12 +#define GUEST_L2_PAGETABLE_SHIFT 21 +#define GUEST_L3_PAGETABLE_SHIFT 30 +#define GUEST_L4_PAGETABLE_SHIFT 39 + +#define GUEST_L1_PAGETABLE_RSVD 0 +#define GUEST_L2_PAGETABLE_RSVD 0 +#define GUEST_L3_PAGETABLE_RSVD 0 +/* NB L4e._PAGE_GLOBAL is reserved for AMD, but ignored for Intel. */ +#define GUEST_L4_PAGETABLE_RSVD _PAGE_PSE + +#endif + +typedef l1_pgentry_t guest_l1e_t; +typedef l2_pgentry_t guest_l2e_t; +typedef l3_pgentry_t guest_l3e_t; +#if GUEST_PAGING_LEVELS >= 4 +typedef l4_pgentry_t guest_l4e_t; +#endif +typedef intpte_t guest_intpte_t; + +#define PRI_gpte "016"PRIx64 + +static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) +{ return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); } +static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) +{ return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); } +static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e) +{ return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); } +#if GUEST_PAGING_LEVELS >= 4 +static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e) +{ return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); } +#endif + +static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) +{ return l1e_get_flags(gl1e); } +static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) +{ return l2e_get_flags(gl2e); } +static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e) +{ return l3e_get_flags(gl3e); } +#if GUEST_PAGING_LEVELS >= 4 +static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e) +{ return l4e_get_flags(gl4e); } +#endif + +static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e) +{ return l1e_get_pkey(gl1e); } +static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e) +{ return l2e_get_pkey(gl2e); } +static inline u32 guest_l3e_get_pkey(guest_l3e_t gl3e) +{ return l3e_get_pkey(gl3e); } + +static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) +{ return l1e_from_pfn(gfn_x(gfn), flags); } +static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) +{ return l2e_from_pfn(gfn_x(gfn), flags); } +static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags) +{ return l3e_from_pfn(gfn_x(gfn), flags); } +#if GUEST_PAGING_LEVELS >= 4 +static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags) +{ return l4e_from_pfn(gfn_x(gfn), flags); } +#endif + +#define guest_l1_table_offset(a) l1_table_offset(a) +#define guest_l2_table_offset(a) l2_table_offset(a) +#define guest_l3_table_offset(a) l3_table_offset(a) +#define guest_l4_table_offset(a) l4_table_offset(a) + +#endif /* GUEST_PAGING_LEVELS != 2 */ + +/* Mask of the GFNs covered by an L2 or L3 superpage */ +#define GUEST_L2_GFN_MASK (GUEST_L1_PAGETABLE_ENTRIES - 1) +#define GUEST_L3_GFN_MASK \ + ((GUEST_L2_PAGETABLE_ENTRIES * GUEST_L1_PAGETABLE_ENTRIES) - 1) + + +/* Which pagetable features are supported on this vcpu? */ + +static always_inline bool guest_can_use_l2_superpages(const struct vcpu *v) +{ + /* + * PV guests use Xen's paging settings. Being 4-level, 2M + * superpages are unconditionally supported. + * + * The L2 _PAGE_PSE bit must be honoured in HVM guests, whenever + * CR4.PSE is set or the guest is in PAE or long mode. + * It's also used in the dummy PT for vcpus with CR0.PG cleared. + */ + return (is_pv_vcpu(v) || + GUEST_PAGING_LEVELS != 2 || + !hvm_paging_enabled(v) || + (v->arch.hvm.guest_cr[4] & X86_CR4_PSE)); +} + +static always_inline bool guest_can_use_l3_superpages(const struct domain *d) +{ + /* + * There are no control register settings for the hardware pagewalk on the + * subject of 1G superpages. + * + * Shadow pagetables don't support 1GB superpages at all, and will always + * treat L3 _PAGE_PSE as reserved. + * + * With HAP however, if the guest constructs a 1GB superpage on capable + * hardware, it will function irrespective of whether the feature is + * advertised. Xen's model of performing a pagewalk should match. + */ + return GUEST_PAGING_LEVELS >= 4 && paging_mode_hap(d) && cpu_has_page1gb; +} + +static inline bool guest_can_use_pse36(const struct domain *d) +{ + /* + * Only called in the context of 2-level guests, after + * guest_can_use_l2_superpages() has indicated true. + * + * Shadow pagetables don't support PSE36 superpages at all, and will + * always treat them as reserved. + * + * With HAP however, once L2 superpages are active, here are no control + * register settings for the hardware pagewalk on the subject of PSE36. + * If the guest constructs a PSE36 superpage on capable hardware, it will + * function irrespective of whether the feature is advertised. Xen's + * model of performing a pagewalk should match. + */ + return paging_mode_hap(d) && cpu_has_pse36; +} + +static always_inline bool guest_nx_enabled(const struct vcpu *v) +{ + if ( GUEST_PAGING_LEVELS == 2 ) /* NX has no effect witout CR4.PAE. */ + return false; + + /* PV guests can't control EFER.NX, and inherits Xen's choice. */ + return is_pv_vcpu(v) ? cpu_has_nx : hvm_nx_enabled(v); +} + +static always_inline bool guest_wp_enabled(const struct vcpu *v) +{ + /* PV guests can't control CR0.WP, and it is unconditionally set by Xen. */ + return is_pv_vcpu(v) || hvm_wp_enabled(v); +} + +static always_inline bool guest_smep_enabled(const struct vcpu *v) +{ + return !is_pv_vcpu(v) && hvm_smep_enabled(v); +} + +static always_inline bool guest_smap_enabled(const struct vcpu *v) +{ + return !is_pv_vcpu(v) && hvm_smap_enabled(v); +} + +static always_inline bool guest_pku_enabled(const struct vcpu *v) +{ + return !is_pv_vcpu(v) && hvm_pku_enabled(v); +} + +/* Helpers for identifying whether guest entries have reserved bits set. */ + +/* Bits reserved because of maxphysaddr, and (lack of) EFER.NX */ +static always_inline uint64_t guest_rsvd_bits(const struct vcpu *v) +{ + return ((PADDR_MASK & + ~((1ul << v->domain->arch.cpuid->extd.maxphysaddr) - 1)) | + (guest_nx_enabled(v) ? 0 : put_pte_flags(_PAGE_NX_BIT))); +} + +static always_inline bool guest_l1e_rsvd_bits(const struct vcpu *v, + guest_l1e_t l1e) +{ + return l1e.l1 & (guest_rsvd_bits(v) | GUEST_L1_PAGETABLE_RSVD); +} + +static always_inline bool guest_l2e_rsvd_bits(const struct vcpu *v, + guest_l2e_t l2e) +{ + uint64_t rsvd_bits = guest_rsvd_bits(v); + + return ((l2e.l2 & (rsvd_bits | GUEST_L2_PAGETABLE_RSVD | + (guest_can_use_l2_superpages(v) ? 0 : _PAGE_PSE))) || + ((l2e.l2 & _PAGE_PSE) && + (l2e.l2 & ((GUEST_PAGING_LEVELS == 2 && guest_can_use_pse36(v->domain)) + /* PSE36 tops out at 40 bits of address width. */ + ? (fold_pse36(rsvd_bits | (1ul << 40))) + : SUPERPAGE_RSVD(GUEST_L2_PAGETABLE_SHIFT))))); +} + +#if GUEST_PAGING_LEVELS >= 3 +static always_inline bool guest_l3e_rsvd_bits(const struct vcpu *v, + guest_l3e_t l3e) +{ + return ((l3e.l3 & (guest_rsvd_bits(v) | GUEST_L3_PAGETABLE_RSVD | + (guest_can_use_l3_superpages(v->domain) ? 0 : _PAGE_PSE))) || + ((l3e.l3 & _PAGE_PSE) && + (l3e.l3 & SUPERPAGE_RSVD(GUEST_L3_PAGETABLE_SHIFT)))); +} + +#if GUEST_PAGING_LEVELS >= 4 +static always_inline bool guest_l4e_rsvd_bits(const struct vcpu *v, + guest_l4e_t l4e) +{ + return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD | + ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD) + ? _PAGE_GLOBAL : 0)); +} +#endif /* GUEST_PAGING_LEVELS >= 4 */ +#endif /* GUEST_PAGING_LEVELS >= 3 */ + +/* Type used for recording a walk through guest pagetables. It is + * filled in by the pagetable walk function, and also used as a cache + * for later walks. When we encounter a superpage l2e, we fabricate an + * l1e for propagation to the shadow (for splintering guest superpages + * into many shadow l1 entries). */ +typedef struct guest_pagetable_walk walk_t; +struct guest_pagetable_walk +{ + unsigned long va; /* Address we were looking for */ +#if GUEST_PAGING_LEVELS >= 3 +#if GUEST_PAGING_LEVELS >= 4 + guest_l4e_t l4e; /* Guest's level 4 entry */ +#endif + guest_l3e_t l3e; /* Guest's level 3 entry */ +#endif + guest_l2e_t l2e; /* Guest's level 2 entry */ + union + { + guest_l1e_t l1e; /* Guest's level 1 entry (or fabrication). */ + uint64_t el1e; /* L2 PSE36 superpages wider than 32 bits. */ + }; +#if GUEST_PAGING_LEVELS >= 4 + mfn_t l4mfn; /* MFN that the level 4 entry was in */ + mfn_t l3mfn; /* MFN that the level 3 entry was in */ +#endif + mfn_t l2mfn; /* MFN that the level 2 entry was in */ + mfn_t l1mfn; /* MFN that the level 1 entry was in */ + + uint32_t pfec; /* Accumulated PFEC_* error code from walk. */ +}; + +/* Given a walk_t, translate the gw->va into the guest's notion of the + * corresponding frame number. */ +static inline gfn_t guest_walk_to_gfn(const walk_t *gw) +{ + if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) ) + return INVALID_GFN; + return (GUEST_PAGING_LEVELS == 2 + ? _gfn(gw->el1e >> PAGE_SHIFT) + : guest_l1e_get_gfn(gw->l1e)); +} + +/* Given a walk_t, translate the gw->va into the guest's notion of the + * corresponding physical address. */ +static inline paddr_t guest_walk_to_gpa(const walk_t *gw) +{ + gfn_t gfn = guest_walk_to_gfn(gw); + + if ( gfn_eq(gfn, INVALID_GFN) ) + return INVALID_PADDR; + + return (gfn_x(gfn) << PAGE_SHIFT) | (gw->va & ~PAGE_MASK); +} + +/* Given a walk_t from a successful walk, return the page-order of the + * page or superpage that the virtual address is in. */ +static inline unsigned int guest_walk_to_page_order(const walk_t *gw) +{ + /* This is only valid for successful walks - otherwise the + * PSE bits might be invalid. */ + ASSERT(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT); +#if GUEST_PAGING_LEVELS >= 3 + if ( guest_l3e_get_flags(gw->l3e) & _PAGE_PSE ) + return GUEST_L3_PAGETABLE_SHIFT - PAGE_SHIFT; +#endif + if ( guest_l2e_get_flags(gw->l2e) & _PAGE_PSE ) + return GUEST_L2_PAGETABLE_SHIFT - PAGE_SHIFT; + return GUEST_L1_PAGETABLE_SHIFT - PAGE_SHIFT; +} + + +/* + * Walk the guest pagetables, after the manner of a hardware walker. + * + * Inputs: a vcpu, a virtual address, a walk_t to fill, a + * pointer to a pagefault code, the MFN of the guest's + * top-level pagetable, and a mapping of the + * guest's top-level pagetable. + * + * We walk the vcpu's guest pagetables, filling the walk_t with what we + * see and adding any Accessed and Dirty bits that are needed in the + * guest entries. Using the pagefault code, we check the permissions as + * we go. For the purposes of reading pagetables we treat all non-RAM + * memory as contining zeroes. + * + * Returns a boolean indicating success or failure. walk_t.pfec contains + * the accumulated error code on failure. + */ + +/* Macro-fu so you can call guest_walk_tables() and get the right one. */ +#define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels +#define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l) +#define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS) + +bool +guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, + unsigned long va, walk_t *gw, uint32_t pfec, + gfn_t top_gfn, mfn_t top_mfn, void *top_map); + +/* Pretty-print the contents of a guest-walk */ +static inline void print_gw(const walk_t *gw) +{ + gprintk(XENLOG_INFO, "GUEST WALK TO %p\n", _p(gw->va)); +#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */ +#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ + gprintk(XENLOG_INFO, " l4e=%" PRI_gpte " l4mfn=%" PRI_mfn "\n", + gw->l4e.l4, mfn_x(gw->l4mfn)); + gprintk(XENLOG_INFO, " l3e=%" PRI_gpte " l3mfn=%" PRI_mfn "\n", + gw->l3e.l3, mfn_x(gw->l3mfn)); +#else /* PAE only... */ + gprintk(XENLOG_INFO, " l3e=%" PRI_gpte "\n", gw->l3e.l3); +#endif /* PAE or 64... */ +#endif /* All levels... */ + gprintk(XENLOG_INFO, " l2e=%" PRI_gpte " l2mfn=%" PRI_mfn "\n", + gw->l2e.l2, mfn_x(gw->l2mfn)); +#if GUEST_PAGING_LEVELS == 2 + gprintk(XENLOG_INFO, " el1e=%08" PRIx64 " l1mfn=%" PRI_mfn "\n", + gw->el1e, mfn_x(gw->l1mfn)); +#else + gprintk(XENLOG_INFO, " l1e=%" PRI_gpte " l1mfn=%" PRI_mfn "\n", + gw->l1e.l1, mfn_x(gw->l1mfn)); +#endif + gprintk(XENLOG_INFO, " pfec=%02x[%c%c%c%c%c%c]\n", gw->pfec, + gw->pfec & PFEC_prot_key ? 'K' : '-', + gw->pfec & PFEC_insn_fetch ? 'I' : 'd', + gw->pfec & PFEC_reserved_bit ? 'R' : '-', + gw->pfec & PFEC_user_mode ? 'U' : 's', + gw->pfec & PFEC_write_access ? 'W' : 'r', + gw->pfec & PFEC_page_present ? 'P' : '-' + ); +} + +#endif /* _XEN_ASM_GUEST_PT_H */ diff --git a/xen/arch/x86/include/asm/hap.h b/xen/arch/x86/include/asm/hap.h new file mode 100644 index 0000000000..90dece29de --- /dev/null +++ b/xen/arch/x86/include/asm/hap.h @@ -0,0 +1,60 @@ +/****************************************************************************** + * include/asm-x86/hap.h + * + * hardware-assisted paging + * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) + * + * Parts of this code are Copyright (c) 2006 by XenSource Inc. + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef _XEN_HAP_H +#define _XEN_HAP_H + +#define HAP_PRINTK(_f, _a...) \ + debugtrace_printk("hap: %s(): " _f, __func__, ##_a) + +/************************************************/ +/* hap domain level functions */ +/************************************************/ +void hap_domain_init(struct domain *d); +int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl); +int hap_enable(struct domain *d, u32 mode); +void hap_final_teardown(struct domain *d); +void hap_vcpu_teardown(struct vcpu *v); +void hap_teardown(struct domain *d, bool *preempted); +void hap_vcpu_init(struct vcpu *v); +int hap_track_dirty_vram(struct domain *d, + unsigned long begin_pfn, + unsigned int nr_frames, + XEN_GUEST_HANDLE(void) dirty_bitmap); + +extern const struct paging_mode *hap_paging_get_mode(struct vcpu *); +int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted); +unsigned int hap_get_allocation(struct domain *d); + +#endif /* XEN_HAP_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hardirq.h b/xen/arch/x86/include/asm/hardirq.h new file mode 100644 index 0000000000..276e3419d7 --- /dev/null +++ b/xen/arch/x86/include/asm/hardirq.h @@ -0,0 +1,37 @@ +#ifndef __ASM_HARDIRQ_H +#define __ASM_HARDIRQ_H + +#include +#include + +typedef struct { + unsigned int __softirq_pending; + unsigned int __local_irq_count; + unsigned int nmi_count; + unsigned int mce_count; + bool_t __mwait_wakeup; +} __cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +#define in_irq() (local_irq_count(smp_processor_id()) != 0) + +#define irq_enter() (local_irq_count(smp_processor_id())++) +#define irq_exit() (local_irq_count(smp_processor_id())--) + +#define nmi_count(cpu) __IRQ_STAT(cpu, nmi_count) +#define in_nmi_handler() (nmi_count(smp_processor_id()) != 0) +#define nmi_enter() (nmi_count(smp_processor_id())++) +#define nmi_exit() (nmi_count(smp_processor_id())--) + +#define mce_count(cpu) __IRQ_STAT(cpu, mce_count) +#define in_mce_handler() (mce_count(smp_processor_id()) != 0) +#define mce_enter() (mce_count(smp_processor_id())++) +#define mce_exit() (mce_count(smp_processor_id())--) + +void ack_bad_irq(unsigned int irq); + +extern void apic_intr_init(void); +extern void smp_intr_init(void); + +#endif /* __ASM_HARDIRQ_H */ diff --git a/xen/arch/x86/include/asm/hpet.h b/xen/arch/x86/include/asm/hpet.h new file mode 100644 index 0000000000..8f9725a95e --- /dev/null +++ b/xen/arch/x86/include/asm/hpet.h @@ -0,0 +1,101 @@ +#ifndef __X86_HPET_H__ +#define __X86_HPET_H__ + +/* + * Documentation on HPET can be found at: + * http://www.intel.com/content/dam/www/public/us/en/documents/ + * technical-specifications/software-developers-hpet-spec-1-0a.pdf + */ + +#define HPET_MMAP_SIZE 1024 + +#define HPET_ID 0x000 +#define HPET_PERIOD 0x004 +#define HPET_CFG 0x010 +#define HPET_STATUS 0x020 +#define HPET_COUNTER 0x0f0 +#define HPET_Tn_CFG(n) (0x100 + (n) * 0x20) +#define HPET_Tn_CMP(n) (0x108 + (n) * 0x20) +#define HPET_Tn_ROUTE(n) (0x110 + (n) * 0x20) + +#define HPET_ID_VENDOR 0xffff0000 +#define HPET_ID_LEGSUP 0x00008000 +#define HPET_ID_64BIT 0x00002000 +#define HPET_ID_NUMBER 0x00001f00 +#define HPET_ID_REV 0x000000ff +#define HPET_ID_NUMBER_SHIFT 8 +#define HPET_ID_VENDOR_SHIFT 16 + +#define HPET_CFG_ENABLE 0x001 +#define HPET_CFG_LEGACY 0x002 +#define HPET_LEGACY_8254 2 +#define HPET_LEGACY_RTC 8 + +#define HPET_TN_LEVEL 0x002 +#define HPET_TN_ENABLE 0x004 +#define HPET_TN_PERIODIC 0x008 +#define HPET_TN_PERIODIC_CAP 0x010 +#define HPET_TN_64BIT_CAP 0x020 +#define HPET_TN_SETVAL 0x040 +#define HPET_TN_32BIT 0x100 +#define HPET_TN_ROUTE 0x3e00 +#define HPET_TN_FSB 0x4000 +#define HPET_TN_FSB_CAP 0x8000 +#define HPET_TN_RESERVED 0xffff0081 +#define HPET_TN_INT_ROUTE_CAP (0xffffffffULL << 32) + + +#define hpet_read32(x) \ + (*(volatile u32 *)(fix_to_virt(FIX_HPET_BASE) + (x))) +#define hpet_write32(y,x) \ + (*(volatile u32 *)(fix_to_virt(FIX_HPET_BASE) + (x)) = (y)) + +extern unsigned long hpet_address; +extern u8 hpet_blockid; +extern u8 hpet_flags; +extern int8_t opt_hpet_legacy_replacement; + +/* + * Detect and initialise HPET hardware: return counter update frequency. + * Return value is zero if HPET is unavailable. + */ +u64 hpet_setup(void); +void hpet_resume(u32 *); + +/* + * Disable HPET hardware: restore it to boot time state. + */ +void hpet_disable(void); + +/* + * Callback from legacy timer (PIT channel 0) IRQ handler. + * Returns 1 if tick originated from HPET; else 0. + */ +int hpet_legacy_irq_tick(void); + +/* + * Try to enable HPET Legacy Replacement mode. Returns a boolean indicating + * whether the HPET configuration was changed. + */ +bool hpet_enable_legacy_replacement_mode(void); + +/* + * Undo the effects of hpet_disable_legacy_replacement_mode(). Must not be + * called unless enable() returned true. + */ +void hpet_disable_legacy_replacement_mode(void); + +/* + * Temporarily use an HPET event counter for timer interrupt handling, + * rather than using the LAPIC timer. Used for Cx state entry. + */ +void hpet_broadcast_init(void); +void hpet_broadcast_resume(void); +void hpet_broadcast_enter(void); +void hpet_broadcast_exit(void); +int hpet_broadcast_is_available(void); +void hpet_disable_legacy_broadcast(void); + +extern void (*pv_rtc_handler)(uint8_t reg, uint8_t value); + +#endif /* __X86_HPET_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/asid.h b/xen/arch/x86/include/asm/hvm/asid.h new file mode 100644 index 0000000000..633ddb72e4 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/asid.h @@ -0,0 +1,52 @@ +/* + * asid.h: ASID management + * Copyright (c) 2007, Advanced Micro Devices, Inc. + * Copyright (c) 2009, Citrix Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_ASID_H__ +#define __ASM_X86_HVM_ASID_H__ + + +struct vcpu; +struct hvm_vcpu_asid; + +/* Initialise ASID management for the current physical CPU. */ +void hvm_asid_init(int nasids); + +/* Invalidate a particular ASID allocation: forces re-allocation. */ +void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid); + +/* Invalidate all ASID allocations for specified VCPU: forces re-allocation. */ +void hvm_asid_flush_vcpu(struct vcpu *v); + +/* Flush all ASIDs on this processor core. */ +void hvm_asid_flush_core(void); + +/* Called before entry to guest context. Checks ASID allocation, returns a + * boolean indicating whether all ASIDs must be flushed. */ +bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid); + +#endif /* __ASM_X86_HVM_ASID_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/cacheattr.h b/xen/arch/x86/include/asm/hvm/cacheattr.h new file mode 100644 index 0000000000..79e721d074 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/cacheattr.h @@ -0,0 +1,23 @@ +#ifndef __HVM_CACHEATTR_H__ +#define __HVM_CACHEATTR_H__ + +#include + +struct domain; +void hvm_init_cacheattr_region_list(struct domain *d); +void hvm_destroy_cacheattr_region_list(struct domain *d); + +/* + * Check whether gfn is in the pinned range: + * if yes, return the (non-negative) type + * if no or ambiguous, return a negative error code + */ +int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn, + unsigned int order); + + +/* Set pinned caching type for a domain. */ +int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, + uint64_t gfn_end, uint32_t type); + +#endif /* __HVM_CACHEATTR_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/domain.h b/xen/arch/x86/include/asm/hvm/domain.h new file mode 100644 index 0000000000..698455444e --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/domain.h @@ -0,0 +1,173 @@ +/* + * domain.h: HVM per domain definitions + * + * Copyright (c) 2004, Intel Corporation. + * Copyright (c) 2005, International Business Machines Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_DOMAIN_H__ +#define __ASM_X86_HVM_DOMAIN_H__ + +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_MEM_SHARING +struct mem_sharing_domain +{ + bool enabled, block_interrupts; + + /* + * When releasing shared gfn's in a preemptible manner, recall where + * to resume the search. + */ + unsigned long next_shared_gfn_to_relinquish; +}; +#endif + +/* + * This structure defines function hooks to support hardware-assisted + * virtual interrupt delivery to guest. (e.g. VMX PI and SVM AVIC). + * + * These hooks are defined by the underlying arch-specific code + * as needed. For example: + * - When the domain is enabled with virtual IPI delivery + * - When the domain is enabled with virtual I/O int delivery + * and actually has a physical device assigned . + */ +struct hvm_pi_ops { + unsigned int flags; + + /* + * Hook into arch_vcpu_block(), which is called + * from vcpu_block() and vcpu_do_poll(). + */ + void (*vcpu_block)(struct vcpu *); +}; + +struct hvm_domain { + /* Guest page range used for non-default ioreq servers */ + struct { + unsigned long base; + unsigned long mask; /* indexed by GFN minus base */ + unsigned long legacy_mask; /* indexed by HVM param number */ + } ioreq_gfn; + + /* Cached CF8 for guest PCI config cycles */ + uint32_t pci_cf8; + + struct pl_time *pl_time; + + struct hvm_io_handler *io_handler; + unsigned int io_handler_count; + + /* Lock protects access to irq, vpic and vioapic. */ + spinlock_t irq_lock; + struct hvm_irq *irq; + struct hvm_hw_vpic vpic[2]; /* 0=master; 1=slave */ + struct hvm_vioapic **vioapic; + unsigned int nr_vioapics; + struct hvm_hw_stdvga stdvga; + + /* + * hvm_hw_pmtimer is a publicly-visible name. We will defer renaming + * it to the more appropriate hvm_hw_acpi until the expected + * comprehensive rewrte of migration code, thus avoiding code churn + * in public header files. + * Internally, however, we will be using hvm_hw_acpi. + */ +#define hvm_hw_acpi hvm_hw_pmtimer + struct hvm_hw_acpi acpi; + + /* VCPU which is current target for 8259 interrupts. */ + struct vcpu *i8259_target; + + /* emulated irq to pirq */ + struct radix_tree_root emuirq_pirq; + + uint64_t *params; + + /* Memory ranges with pinned cache attributes. */ + struct list_head pinned_cacheattr_ranges; + + /* VRAM dirty support. Protect with the domain paging lock. */ + struct sh_dirty_vram *dirty_vram; + + /* If one of vcpus of this domain is in no_fill_mode or + * mtrr/pat between vcpus is not the same, set is_in_uc_mode + */ + spinlock_t uc_lock; + bool is_in_uc_mode; + + bool is_s3_suspended; + + /* hypervisor intercepted msix table */ + struct list_head msixtbl_list; + + struct viridian_domain *viridian; + + /* + * TSC value that VCPUs use to calculate their tsc_offset value. + * Used during initialization and save/restore. + */ + uint64_t sync_tsc; + + uint64_t tsc_scaling_ratio; + + unsigned long *io_bitmap; + + /* List of guest to machine IO ports mapping. */ + struct list_head g2m_ioport_list; + + /* List of MMCFG regions trapped by Xen. */ + struct list_head mmcfg_regions; + rwlock_t mmcfg_lock; + + /* List of MSI-X tables. */ + struct list_head msix_tables; + + /* List of permanently write-mapped pages. */ + struct { + spinlock_t lock; + struct list_head list; + } write_map; + + struct hvm_pi_ops pi_ops; + + union { + struct vmx_domain vmx; + struct svm_domain svm; + }; + +#ifdef CONFIG_MEM_SHARING + struct mem_sharing_domain mem_sharing; +#endif +}; + +#endif /* __ASM_X86_HVM_DOMAIN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/emulate.h b/xen/arch/x86/include/asm/hvm/emulate.h new file mode 100644 index 0000000000..e670040603 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/emulate.h @@ -0,0 +1,156 @@ +/****************************************************************************** + * hvm/emulate.h + * + * HVM instruction emulation. Used for MMIO and VMX real mode. + * + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Authors: + * Keir Fraser + */ + +#ifndef __ASM_X86_HVM_EMULATE_H__ +#define __ASM_X86_HVM_EMULATE_H__ + +#include +#include +#include +#include +#include + +typedef bool hvm_emulate_validate_t(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt); + +struct hvm_emulate_ctxt { + struct x86_emulate_ctxt ctxt; + + /* + * validate: Post-decode, pre-emulate hook to allow caller controlled + * filtering. + */ + hvm_emulate_validate_t *validate; + + /* Cache of 16 bytes of instruction. */ + uint8_t insn_buf[16]; + unsigned long insn_buf_eip; + unsigned int insn_buf_bytes; + + struct segment_register seg_reg[10]; + unsigned long seg_reg_accessed; + unsigned long seg_reg_dirty; + + /* + * MFNs behind temporary mappings in the write callback. The length is + * arbitrary, and can be increased if writes longer than PAGE_SIZE+1 are + * needed. + */ + mfn_t mfn[2]; + + uint32_t intr_shadow; + + bool is_mem_access; + + bool_t set_context; +}; + +enum emul_kind { + EMUL_KIND_NORMAL, + EMUL_KIND_NOWRITE, + EMUL_KIND_SET_CONTEXT_DATA, + EMUL_KIND_SET_CONTEXT_INSN +}; + +bool __nonnull(1, 2) hvm_emulate_one_insn( + hvm_emulate_validate_t *validate, + const char *descr); +int hvm_emulate_one( + struct hvm_emulate_ctxt *hvmemul_ctxt, + enum vio_completion completion); +void hvm_emulate_one_vm_event(enum emul_kind kind, + unsigned int trapnr, + unsigned int errcode); +/* Must be called once to set up hvmemul state. */ +void hvm_emulate_init_once( + struct hvm_emulate_ctxt *hvmemul_ctxt, + hvm_emulate_validate_t *validate, + struct cpu_user_regs *regs); +/* Must be called once before each instruction emulated. */ +void hvm_emulate_init_per_insn( + struct hvm_emulate_ctxt *hvmemul_ctxt, + const unsigned char *insn_buf, + unsigned int insn_bytes); +void hvm_emulate_writeback( + struct hvm_emulate_ctxt *hvmemul_ctxt); +void hvmemul_cancel(struct vcpu *v); +struct segment_register *hvmemul_get_seg_reg( + enum x86_segment seg, + struct hvm_emulate_ctxt *hvmemul_ctxt); +int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla); + +static inline bool handle_mmio(void) +{ + return hvm_emulate_one_insn(x86_insn_is_mem_access, "MMIO"); +} + +int hvmemul_insn_fetch(unsigned long offset, + void *p_data, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt); +int hvmemul_do_pio_buffer(uint16_t port, + unsigned int size, + uint8_t dir, + void *buffer); + +#ifdef CONFIG_HVM +/* + * The cache controlled by the functions below is not like an ordinary CPU + * cache, i.e. aiming to help performance, but a "secret store" which is + * needed for correctness. The issue it helps addressing is the need for + * re-execution of an insn (after data was provided by a device model) to + * observe the exact same memory state, i.e. to specifically not observe any + * updates which may have occurred in the meantime by other agents. + * Therefore this cache gets + * - enabled when emulation of an insn starts, + * - disabled across processing secondary things like a hypercall resulting + * from insn emulation, + * - disabled again when an emulated insn is known to not require any + * further re-execution. + */ +int __must_check hvmemul_cache_init(struct vcpu *v); +static inline void hvmemul_cache_destroy(struct vcpu *v) +{ + XFREE(v->arch.hvm.hvm_io.cache); +} +bool hvmemul_read_cache(const struct vcpu *, paddr_t gpa, + void *buffer, unsigned int size); +void hvmemul_write_cache(const struct vcpu *, paddr_t gpa, + const void *buffer, unsigned int size); +unsigned int hvmemul_cache_disable(struct vcpu *); +void hvmemul_cache_restore(struct vcpu *, unsigned int token); +/* For use in ASSERT()s only: */ +static inline bool hvmemul_cache_disabled(struct vcpu *v) +{ + return hvmemul_cache_disable(v) == hvmemul_cache_disable(v); +} +#else +static inline bool hvmemul_read_cache(const struct vcpu *v, paddr_t gpa, + void *buf, + unsigned int size) { return false; } +static inline void hvmemul_write_cache(const struct vcpu *v, paddr_t gpa, + const void *buf, unsigned int size) {} +#endif + +void hvm_dump_emulation_state(const char *loglvl, const char *prefix, + struct hvm_emulate_ctxt *hvmemul_ctxt, int rc); + +#endif /* __ASM_X86_HVM_EMULATE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/grant_table.h b/xen/arch/x86/include/asm/hvm/grant_table.h new file mode 100644 index 0000000000..a5612585b3 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/grant_table.h @@ -0,0 +1,61 @@ +/* + * asm-x86/hvm/grant_table.h + * + * Grant table interfaces for HVM guests + * + * Copyright (C) 2017 Wei Liu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __X86_HVM_GRANT_TABLE_H__ +#define __X86_HVM_GRANT_TABLE_H__ + +#ifdef CONFIG_HVM + +int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, + unsigned int flags, + unsigned int cache_flags); +int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame, + uint64_t new_addr, unsigned int flags); + +#else + +#include + +static inline int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, + unsigned int flags, + unsigned int cache_flags) +{ + return GNTST_general_error; +} + +static inline int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame, + uint64_t new_addr, unsigned int flags) +{ + return GNTST_general_error; +} + +#endif + +#endif /* __X86_HVM_GRANT_TABLE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/guest_access.h b/xen/arch/x86/include/asm/hvm/guest_access.h new file mode 100644 index 0000000000..edacba75db --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/guest_access.h @@ -0,0 +1,8 @@ +#ifndef __ASM_X86_HVM_GUEST_ACCESS_H__ +#define __ASM_X86_HVM_GUEST_ACCESS_H__ + +unsigned int copy_to_user_hvm(void *to, const void *from, unsigned int len); +unsigned int clear_user_hvm(void *to, unsigned int len); +unsigned int copy_from_user_hvm(void *to, const void *from, unsigned int len); + +#endif /* __ASM_X86_HVM_GUEST_ACCESS_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h new file mode 100644 index 0000000000..bd2cbb0e7b --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/hvm.h @@ -0,0 +1,886 @@ +/* + * hvm.h: Hardware virtual machine assist interface definitions. + * + * Leendert van Doorn, leendert@watson.ibm.com + * Copyright (c) 2005, International Business Machines Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_HVM_H__ +#define __ASM_X86_HVM_HVM_H__ + +#include +#include +#include +#include +#include + +#ifdef CONFIG_HVM_FEP +/* Permit use of the Forced Emulation Prefix in HVM guests */ +extern bool_t opt_hvm_fep; +#else +#define opt_hvm_fep 0 +#endif + +/* Interrupt acknowledgement sources. */ +enum hvm_intsrc { + hvm_intsrc_none, + hvm_intsrc_pic, + hvm_intsrc_lapic, + hvm_intsrc_nmi, + hvm_intsrc_mce, + hvm_intsrc_vector +}; +struct hvm_intack { + uint8_t source; /* enum hvm_intsrc */ + uint8_t vector; +}; +#define hvm_intack(src, vec) ((struct hvm_intack) { hvm_intsrc_##src, vec }) +#define hvm_intack_none hvm_intack(none, 0) +#define hvm_intack_pic(vec) hvm_intack(pic, vec) +#define hvm_intack_lapic(vec) hvm_intack(lapic, vec) +#define hvm_intack_nmi hvm_intack(nmi, 2) +#define hvm_intack_mce hvm_intack(mce, 18) +#define hvm_intack_vector(vec) hvm_intack(vector, vec) +enum hvm_intblk { + hvm_intblk_none, /* not blocked (deliverable) */ + hvm_intblk_shadow, /* MOV-SS or STI shadow */ + hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */ + hvm_intblk_tpr, /* LAPIC TPR too high */ + hvm_intblk_nmi_iret, /* NMI blocked until IRET */ + hvm_intblk_arch, /* SVM/VMX specific reason */ +}; + +/* These happen to be the same as the VMX interrupt shadow definitions. */ +#define HVM_INTR_SHADOW_STI 0x00000001 +#define HVM_INTR_SHADOW_MOV_SS 0x00000002 +#define HVM_INTR_SHADOW_SMI 0x00000004 +#define HVM_INTR_SHADOW_NMI 0x00000008 + +/* + * HAP super page capabilities: + * bit0: if 2MB super page is allowed? + * bit1: if 1GB super page is allowed? + */ +#define HVM_HAP_SUPERPAGE_2MB 0x00000001 +#define HVM_HAP_SUPERPAGE_1GB 0x00000002 + +#define HVM_EVENT_VECTOR_UNSET (-1) +#define HVM_EVENT_VECTOR_UPDATING (-2) + +/* update_guest_cr() flags. */ +#define HVM_UPDATE_GUEST_CR3_NOFLUSH 0x00000001 + +/* + * The hardware virtual machine (HVM) interface abstracts away from the + * x86/x86_64 CPU virtualization assist specifics. Currently this interface + * supports Intel's VT-x and AMD's SVM extensions. + */ +struct hvm_function_table { + char *name; + + /* Support Hardware-Assisted Paging? */ + bool_t hap_supported; + + /* Necessary hardware support for alternate p2m's? */ + bool altp2m_supported; + + /* Hardware virtual interrupt delivery enable? */ + bool virtual_intr_delivery_enabled; + + /* Indicate HAP capabilities. */ + unsigned int hap_capabilities; + + /* + * Initialise/destroy HVM domain/vcpu resources + */ + int (*domain_initialise)(struct domain *d); + void (*domain_creation_finished)(struct domain *d); + void (*domain_relinquish_resources)(struct domain *d); + void (*domain_destroy)(struct domain *d); + int (*vcpu_initialise)(struct vcpu *v); + void (*vcpu_destroy)(struct vcpu *v); + + /* save and load hvm guest cpu context for save/restore */ + void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); + int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); + + /* Examine specifics of the guest state. */ + unsigned int (*get_interrupt_shadow)(struct vcpu *v); + void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow); + int (*guest_x86_mode)(struct vcpu *v); + unsigned int (*get_cpl)(struct vcpu *v); + void (*get_segment_register)(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg); + void (*set_segment_register)(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg); + unsigned long (*get_shadow_gs_base)(struct vcpu *v); + + /* + * Re-set the value of CR3 that Xen runs on when handling VM exits. + */ + void (*update_host_cr3)(struct vcpu *v); + + /* + * Called to inform HVM layer that a guest CRn or EFER has changed. + */ + void (*update_guest_cr)(struct vcpu *v, unsigned int cr, + unsigned int flags); + void (*update_guest_efer)(struct vcpu *v); + + void (*cpuid_policy_changed)(struct vcpu *v); + + void (*fpu_leave)(struct vcpu *v); + + int (*get_guest_pat)(struct vcpu *v, u64 *); + int (*set_guest_pat)(struct vcpu *v, u64); + + bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *); + bool (*set_guest_bndcfgs)(struct vcpu *v, u64); + + void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc); + + void (*inject_event)(const struct x86_event *event); + + void (*init_hypercall_page)(void *ptr); + + bool (*event_pending)(const struct vcpu *v); + bool (*get_pending_event)(struct vcpu *v, struct x86_event *info); + void (*invlpg)(struct vcpu *v, unsigned long linear); + + int (*cpu_up_prepare)(unsigned int cpu); + void (*cpu_dead)(unsigned int cpu); + + int (*cpu_up)(void); + void (*cpu_down)(void); + + /* Copy up to 15 bytes from cached instruction bytes at current rIP. */ + unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf); + + /* Instruction intercepts: non-void return values are X86EMUL codes. */ + void (*wbinvd_intercept)(void); + void (*fpu_dirty_intercept)(void); + int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content); + int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content); + void (*handle_cd)(struct vcpu *v, unsigned long value); + void (*set_info_guest)(struct vcpu *v); + void (*set_rdtsc_exiting)(struct vcpu *v, bool_t); + void (*set_descriptor_access_exiting)(struct vcpu *v, bool); + + /* Nested HVM */ + int (*nhvm_vcpu_initialise)(struct vcpu *v); + void (*nhvm_vcpu_destroy)(struct vcpu *v); + int (*nhvm_vcpu_reset)(struct vcpu *v); + int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event *event); + uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v); + bool_t (*nhvm_vmcx_guest_intercepts_event)( + struct vcpu *v, unsigned int vector, int errcode); + + bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v); + + enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v); + void (*nhvm_domain_relinquish_resources)(struct domain *d); + + /* Virtual interrupt delivery */ + void (*update_eoi_exit_bitmap)(struct vcpu *v, uint8_t vector, bool set); + void (*process_isr)(int isr, struct vcpu *v); + void (*deliver_posted_intr)(struct vcpu *v, u8 vector); + void (*sync_pir_to_irr)(struct vcpu *v); + bool (*test_pir)(const struct vcpu *v, uint8_t vector); + void (*handle_eoi)(uint8_t vector, int isr); + + /*Walk nested p2m */ + int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa, + paddr_t *L1_gpa, unsigned int *page_order, + uint8_t *p2m_acc, bool_t access_r, + bool_t access_w, bool_t access_x); + + void (*enable_msr_interception)(struct domain *d, uint32_t msr); + bool_t (*is_singlestep_supported)(void); + + /* Alternate p2m */ + void (*altp2m_vcpu_update_p2m)(struct vcpu *v); + void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v); + bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v); + int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs); + + /* vmtrace */ + int (*vmtrace_control)(struct vcpu *v, bool enable, bool reset); + int (*vmtrace_output_position)(struct vcpu *v, uint64_t *pos); + int (*vmtrace_set_option)(struct vcpu *v, uint64_t key, uint64_t value); + int (*vmtrace_get_option)(struct vcpu *v, uint64_t key, uint64_t *value); + int (*vmtrace_reset)(struct vcpu *v); + + /* + * Parameters and callbacks for hardware-assisted TSC scaling, + * which are valid only when the hardware feature is available. + */ + struct { + /* number of bits of the fractional part of TSC scaling ratio */ + uint8_t ratio_frac_bits; + /* maximum-allowed TSC scaling ratio */ + uint64_t max_ratio; + + /* Architecture function to setup TSC scaling ratio */ + void (*setup)(struct vcpu *v); + } tsc_scaling; +}; + +extern struct hvm_function_table hvm_funcs; +extern bool_t hvm_enabled; +extern s8 hvm_port80_allowed; + +extern const struct hvm_function_table *start_svm(void); +extern const struct hvm_function_table *start_vmx(void); + +int hvm_domain_initialise(struct domain *d); +void hvm_domain_relinquish_resources(struct domain *d); +void hvm_domain_destroy(struct domain *d); + +int hvm_vcpu_initialise(struct vcpu *v); +void hvm_vcpu_destroy(struct vcpu *v); +void hvm_vcpu_down(struct vcpu *v); +int hvm_vcpu_cacheattr_init(struct vcpu *v); +void hvm_vcpu_cacheattr_destroy(struct vcpu *v); +void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip); + +void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat); +int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat); + +u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc); + +u64 hvm_scale_tsc(const struct domain *d, u64 tsc); +u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz); + +void hvm_init_guest_time(struct domain *d); +void hvm_set_guest_time(struct vcpu *v, u64 guest_time); +uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, uint64_t at_tsc); + +int vmsi_deliver( + struct domain *d, int vector, + uint8_t dest, uint8_t dest_mode, + uint8_t delivery_mode, uint8_t trig_mode); +struct hvm_pirq_dpci; +void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *); +int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode); + +enum hvm_intblk +hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack); + +void hvm_init_hypercall_page(struct domain *d, void *ptr); + +void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg); +void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg); + +void hvm_set_info_guest(struct vcpu *v); + +bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val); + +int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len); +void hvm_migrate_timers(struct vcpu *v); +void hvm_do_resume(struct vcpu *v); +void hvm_migrate_pirq(struct hvm_pirq_dpci *pirq_dpci, const struct vcpu *v); +void hvm_migrate_pirqs(struct vcpu *v); + +void hvm_inject_event(const struct x86_event *event); + +int hvm_event_needs_reinjection(uint8_t type, uint8_t vector); + +uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2); + +void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable); + +enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int }; +void hvm_task_switch( + uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason, + int32_t errcode, unsigned int insn_len, unsigned int extra_eflags); + +enum hvm_access_type { + hvm_access_insn_fetch, + hvm_access_none, + hvm_access_read, + hvm_access_write +}; + +bool hvm_vcpu_virtual_to_linear( + struct vcpu *v, + enum x86_segment seg, + const struct segment_register *reg, + unsigned long offset, + unsigned int bytes, + enum hvm_access_type access_type, + const struct segment_register *active_cs, + unsigned long *linear_addr); + +static inline bool hvm_virtual_to_linear_addr( + enum x86_segment seg, + const struct segment_register *reg, + unsigned long offset, + unsigned int bytes, + enum hvm_access_type access_type, + const struct segment_register *active_cs, + unsigned long *linear) +{ + return hvm_vcpu_virtual_to_linear(current, seg, reg, offset, bytes, + access_type, active_cs, linear); +} + +void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent, + bool_t *writable); +void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent); +void hvm_unmap_guest_frame(void *p, bool_t permanent); +void hvm_mapped_guest_frames_mark_dirty(struct domain *); + +int hvm_debug_op(struct vcpu *v, int32_t op); + +/* Caller should pause vcpu before calling this function */ +void hvm_toggle_singlestep(struct vcpu *v); +void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx); + +struct npfec; +int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, + struct npfec npfec); + +/* Check CR4/EFER values */ +const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, + signed int cr0_pg); +unsigned long hvm_cr4_guest_valid_bits(const struct domain *d); + +int hvm_copy_context_and_params(struct domain *src, struct domain *dst); + +int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value); + +#ifdef CONFIG_HVM + +#define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0) + +#define hvm_tsc_scaling_supported \ + (!!hvm_funcs.tsc_scaling.ratio_frac_bits) + +#define hvm_default_tsc_scaling_ratio \ + (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits) + +#define hvm_tsc_scaling_ratio(d) \ + ((d)->arch.hvm.tsc_scaling_ratio) + +#define hvm_get_guest_time(v) hvm_get_guest_time_fixed(v, 0) + +#define hvm_paging_enabled(v) \ + (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_PG)) +#define hvm_wp_enabled(v) \ + (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_WP)) +#define hvm_pcid_enabled(v) \ + (!!((v)->arch.hvm.guest_cr[4] & X86_CR4_PCIDE)) +#define hvm_pae_enabled(v) \ + (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PAE)) +#define hvm_smep_enabled(v) \ + (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMEP)) +#define hvm_smap_enabled(v) \ + (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP)) +#define hvm_nx_enabled(v) \ + ((v)->arch.hvm.guest_efer & EFER_NXE) +#define hvm_pku_enabled(v) \ + (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE)) + +/* Can we use superpages in the HAP p2m table? */ +#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB)) +#define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB)) + +#define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA)) + +static inline bool hvm_has_set_descriptor_access_exiting(void) +{ + return hvm_funcs.set_descriptor_access_exiting; +} + +static inline void hvm_domain_creation_finished(struct domain *d) +{ + if ( hvm_funcs.domain_creation_finished ) + alternative_vcall(hvm_funcs.domain_creation_finished, d); +} + +static inline int +hvm_guest_x86_mode(struct vcpu *v) +{ + ASSERT(v == current); + return alternative_call(hvm_funcs.guest_x86_mode, v); +} + +static inline void +hvm_update_host_cr3(struct vcpu *v) +{ + if ( hvm_funcs.update_host_cr3 ) + alternative_vcall(hvm_funcs.update_host_cr3, v); +} + +static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr) +{ + alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0); +} + +static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush) +{ + unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0; + + alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags); +} + +static inline void hvm_update_guest_efer(struct vcpu *v) +{ + alternative_vcall(hvm_funcs.update_guest_efer, v); +} + +static inline void hvm_cpuid_policy_changed(struct vcpu *v) +{ + alternative_vcall(hvm_funcs.cpuid_policy_changed, v); +} + +static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, + uint64_t at_tsc) +{ + alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc); +} + +/* + * Called to ensure than all guest-specific mappings in a tagged TLB are + * flushed; does *not* flush Xen's TLB entries, and on processors without a + * tagged TLB it will be a noop. + */ +static inline void hvm_flush_guest_tlbs(void) +{ + if ( hvm_enabled ) + hvm_asid_flush_core(); +} + +static inline unsigned int +hvm_get_cpl(struct vcpu *v) +{ + return alternative_call(hvm_funcs.get_cpl, v); +} + +static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v) +{ + return alternative_call(hvm_funcs.get_shadow_gs_base, v); +} + +static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val) +{ + return hvm_funcs.get_guest_bndcfgs && + alternative_call(hvm_funcs.get_guest_bndcfgs, v, val); +} + +#define has_hvm_params(d) \ + ((d)->arch.hvm.params != NULL) + +#define viridian_feature_mask(d) \ + (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0) + +#define is_viridian_domain(d) \ + (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq)) + +#define is_viridian_vcpu(v) \ + is_viridian_domain((v)->domain) + +#define has_viridian_time_ref_count(d) \ + (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count)) + +#define has_viridian_apic_assist(d) \ + (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist)) + +#define has_viridian_synic(d) \ + (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_synic)) + +static inline void hvm_inject_exception( + unsigned int vector, unsigned int type, + unsigned int insn_len, int error_code) +{ + struct x86_event event = { + .vector = vector, + .type = type, + .insn_len = insn_len, + .error_code = error_code, + }; + + hvm_inject_event(&event); +} + +static inline void hvm_inject_hw_exception(unsigned int vector, int errcode) +{ + struct x86_event event = { + .vector = vector, + .type = X86_EVENTTYPE_HW_EXCEPTION, + .error_code = errcode, + }; + + hvm_inject_event(&event); +} + +static inline void hvm_inject_page_fault(int errcode, unsigned long cr2) +{ + struct x86_event event = { + .vector = TRAP_page_fault, + .type = X86_EVENTTYPE_HW_EXCEPTION, + .error_code = errcode, + .cr2 = cr2, + }; + + hvm_inject_event(&event); +} + +static inline bool hvm_event_pending(const struct vcpu *v) +{ + return alternative_call(hvm_funcs.event_pending, v); +} + +static inline void hvm_invlpg(struct vcpu *v, unsigned long linear) +{ + alternative_vcall(hvm_funcs.invlpg, v, linear); +} + +/* These bits in CR4 are owned by the host. */ +#define HVM_CR4_HOST_MASK (mmu_cr4_features & \ + (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE)) + +/* These exceptions must always be intercepted. */ +#define HVM_TRAP_MASK ((1U << TRAP_debug) | \ + (1U << TRAP_alignment_check) | \ + (1U << TRAP_machine_check)) + +static inline int hvm_cpu_up(void) +{ + return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0); +} + +static inline void hvm_cpu_down(void) +{ + if ( hvm_funcs.cpu_down ) + hvm_funcs.cpu_down(); +} + +static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf) +{ + return (hvm_funcs.get_insn_bytes + ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0); +} + +static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs) +{ +#ifndef NDEBUG + regs->error_code = 0xbeef; + regs->entry_vector = 0xbeef; + regs->saved_upcall_mask = 0xbf; + regs->cs = 0xbeef; + regs->ss = 0xbeef; + regs->ds = 0xbeef; + regs->es = 0xbeef; + regs->fs = 0xbeef; + regs->gs = 0xbeef; +#endif +} + +/* + * Nested HVM + */ + +/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to + * 'trapnr' exception. + */ +static inline int nhvm_vcpu_vmexit_event( + struct vcpu *v, const struct x86_event *event) +{ + return hvm_funcs.nhvm_vcpu_vmexit_event(v, event); +} + +/* returns l1 guest's cr3 that points to the page table used to + * translate l2 guest physical address to l1 guest physical address. + */ +static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v) +{ + return hvm_funcs.nhvm_vcpu_p2m_base(v); +} + +/* returns true, when l1 guest intercepts the specified trap */ +static inline bool_t nhvm_vmcx_guest_intercepts_event( + struct vcpu *v, unsigned int vector, int errcode) +{ + return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode); +} + +/* returns true when l1 guest wants to use hap to run l2 guest */ +static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v) +{ + return hvm_funcs.nhvm_vmcx_hap_enabled(v); +} + +/* interrupt */ +static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v) +{ + return hvm_funcs.nhvm_intr_blocked(v); +} + +static inline bool_t hvm_enable_msr_interception(struct domain *d, uint32_t msr) +{ + if ( hvm_funcs.enable_msr_interception ) + { + hvm_funcs.enable_msr_interception(d, msr); + return 1; + } + + return 0; +} + +static inline bool_t hvm_is_singlestep_supported(void) +{ + return (hvm_funcs.is_singlestep_supported && + hvm_funcs.is_singlestep_supported()); +} + +static inline bool hvm_hap_supported(void) +{ + return hvm_funcs.hap_supported; +} + +/* returns true if hardware supports alternate p2m's */ +static inline bool hvm_altp2m_supported(void) +{ + return hvm_funcs.altp2m_supported; +} + +/* updates the current hardware p2m */ +static inline void altp2m_vcpu_update_p2m(struct vcpu *v) +{ + if ( hvm_funcs.altp2m_vcpu_update_p2m ) + hvm_funcs.altp2m_vcpu_update_p2m(v); +} + +/* updates VMCS fields related to VMFUNC and #VE */ +static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v) +{ + if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve ) + hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v); +} + +/* emulates #VE */ +static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v) +{ + if ( hvm_funcs.altp2m_vcpu_emulate_ve ) + { + hvm_funcs.altp2m_vcpu_emulate_ve(v); + return true; + } + return false; +} + +static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset) +{ + if ( hvm_funcs.vmtrace_control ) + return hvm_funcs.vmtrace_control(v, enable, reset); + + return -EOPNOTSUPP; +} + +/* Returns -errno, or a boolean of whether tracing is currently active. */ +static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos) +{ + if ( hvm_funcs.vmtrace_output_position ) + return hvm_funcs.vmtrace_output_position(v, pos); + + return -EOPNOTSUPP; +} + +static inline int hvm_vmtrace_set_option( + struct vcpu *v, uint64_t key, uint64_t value) +{ + if ( hvm_funcs.vmtrace_set_option ) + return hvm_funcs.vmtrace_set_option(v, key, value); + + return -EOPNOTSUPP; +} + +static inline int hvm_vmtrace_get_option( + struct vcpu *v, uint64_t key, uint64_t *value) +{ + if ( hvm_funcs.vmtrace_get_option ) + return hvm_funcs.vmtrace_get_option(v, key, value); + + return -EOPNOTSUPP; +} + +static inline int hvm_vmtrace_reset(struct vcpu *v) +{ + if ( hvm_funcs.vmtrace_reset ) + return hvm_funcs.vmtrace_reset(v); + + return -EOPNOTSUPP; +} + +/* + * This must be defined as a macro instead of an inline function, + * because it uses 'struct vcpu' and 'struct domain' which have + * not been defined yet. + */ +#define arch_vcpu_block(v) ({ \ + struct vcpu *v_ = (v); \ + struct domain *d_ = v_->domain; \ + if ( is_hvm_domain(d_) && d_->arch.hvm.pi_ops.vcpu_block ) \ + d_->arch.hvm.pi_ops.vcpu_block(v_); \ +}) + +#else /* CONFIG_HVM */ + +#define hvm_enabled false + +/* + * List of inline functions above, of which only declarations are + * needed because DCE will kick in. + */ +int hvm_guest_x86_mode(struct vcpu *v); +unsigned long hvm_get_shadow_gs_base(struct vcpu *v); +void hvm_cpuid_policy_changed(struct vcpu *v); +void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc); +bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val); + +/* End of prototype list */ + +/* Called by code in other header */ +static inline bool hvm_is_singlestep_supported(void) +{ + return false; +} + +static inline bool hvm_hap_supported(void) +{ + return false; +} + +static inline bool nhvm_vmcx_hap_enabled(const struct vcpu *v) +{ + ASSERT_UNREACHABLE(); + return false; +} + + +/* Called by common code */ +static inline int hvm_cpu_up(void) +{ + return 0; +} + +static inline void hvm_cpu_down(void) {} + +static inline void hvm_flush_guest_tlbs(void) {} + +static inline void hvm_invlpg(const struct vcpu *v, unsigned long linear) +{ + ASSERT_UNREACHABLE(); +} + +static inline void hvm_domain_creation_finished(struct domain *d) +{ + ASSERT_UNREACHABLE(); +} + +/* + * Shadow code needs further cleanup to eliminate some HVM-only paths. For + * now provide the stubs here but assert they will never be reached. + */ +static inline void hvm_update_host_cr3(const struct vcpu *v) +{ + ASSERT_UNREACHABLE(); +} + +static inline void hvm_update_guest_cr3(const struct vcpu *v, bool noflush) +{ + ASSERT_UNREACHABLE(); +} + +static inline unsigned int hvm_get_cpl(const struct vcpu *v) +{ + ASSERT_UNREACHABLE(); + return -1; +} + +static inline bool hvm_event_pending(const struct vcpu *v) +{ + return false; +} + +static inline void hvm_inject_hw_exception(unsigned int vector, int errcode) +{ + ASSERT_UNREACHABLE(); +} + +static inline bool hvm_has_set_descriptor_access_exiting(void) +{ + return false; +} + +static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset) +{ + return -EOPNOTSUPP; +} + +static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos) +{ + return -EOPNOTSUPP; +} + +static inline int hvm_vmtrace_set_option( + struct vcpu *v, uint64_t key, uint64_t value) +{ + return -EOPNOTSUPP; +} + +static inline int hvm_vmtrace_get_option( + struct vcpu *v, uint64_t key, uint64_t *value) +{ + return -EOPNOTSUPP; +} + +#define is_viridian_domain(d) ((void)(d), false) +#define is_viridian_vcpu(v) ((void)(v), false) +#define has_viridian_time_ref_count(d) ((void)(d), false) +#define hvm_long_mode_active(v) ((void)(v), false) +#define hvm_get_guest_time(v) ((void)(v), 0) + +#define hvm_tsc_scaling_supported false +#define hap_has_1gb false +#define hap_has_2mb false + +#define hvm_paging_enabled(v) ((void)(v), false) +#define hvm_wp_enabled(v) ((void)(v), false) +#define hvm_pcid_enabled(v) ((void)(v), false) +#define hvm_pae_enabled(v) ((void)(v), false) +#define hvm_smep_enabled(v) ((void)(v), false) +#define hvm_smap_enabled(v) ((void)(v), false) +#define hvm_nx_enabled(v) ((void)(v), false) +#define hvm_pku_enabled(v) ((void)(v), false) + +#define arch_vcpu_block(v) ((void)(v)) + +#endif /* CONFIG_HVM */ + +#endif /* __ASM_X86_HVM_HVM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/io.h b/xen/arch/x86/include/asm/hvm/io.h new file mode 100644 index 0000000000..54e0161b49 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/io.h @@ -0,0 +1,181 @@ +/* + * io.h: HVM IO support + * + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_IO_H__ +#define __ASM_X86_HVM_IO_H__ + +#include +#include + +#define NR_IO_HANDLERS 32 + +typedef int (*hvm_mmio_read_t)(struct vcpu *v, + unsigned long addr, + unsigned int length, + unsigned long *val); +typedef int (*hvm_mmio_write_t)(struct vcpu *v, + unsigned long addr, + unsigned int length, + unsigned long val); +typedef int (*hvm_mmio_check_t)(struct vcpu *v, unsigned long addr); + +struct hvm_mmio_ops { + hvm_mmio_check_t check; + hvm_mmio_read_t read; + hvm_mmio_write_t write; +}; + +typedef int (*portio_action_t)( + int dir, unsigned int port, unsigned int bytes, uint32_t *val); + +struct hvm_io_handler { + union { + struct { + const struct hvm_mmio_ops *ops; + } mmio; + struct { + unsigned int port, size; + portio_action_t action; + } portio; + }; + const struct hvm_io_ops *ops; + uint8_t type; +}; + +typedef int (*hvm_io_read_t)(const struct hvm_io_handler *, + uint64_t addr, + uint32_t size, + uint64_t *data); +typedef int (*hvm_io_write_t)(const struct hvm_io_handler *, + uint64_t addr, + uint32_t size, + uint64_t data); +typedef bool_t (*hvm_io_accept_t)(const struct hvm_io_handler *, + const ioreq_t *p); +typedef void (*hvm_io_complete_t)(const struct hvm_io_handler *); + +struct hvm_io_ops { + hvm_io_accept_t accept; + hvm_io_read_t read; + hvm_io_write_t write; + hvm_io_complete_t complete; +}; + +int hvm_process_io_intercept(const struct hvm_io_handler *handler, + ioreq_t *p); + +int hvm_io_intercept(ioreq_t *p); + +struct hvm_io_handler *hvm_next_io_handler(struct domain *d); + +bool_t hvm_mmio_internal(paddr_t gpa); + +void register_mmio_handler(struct domain *d, + const struct hvm_mmio_ops *ops); + +void register_portio_handler( + struct domain *d, unsigned int port, unsigned int size, + portio_action_t action); + +bool relocate_portio_handler( + struct domain *d, unsigned int old_port, unsigned int new_port, + unsigned int size); + +void send_timeoffset_req(unsigned long timeoff); +bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, + struct npfec); +bool handle_pio(uint16_t port, unsigned int size, int dir); +void hvm_interrupt_post(struct vcpu *v, int vector, int type); +void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq); +void msix_write_completion(struct vcpu *); + +#ifdef CONFIG_HVM +void msixtbl_init(struct domain *d); +#else +static inline void msixtbl_init(struct domain *d) {} +#endif + +/* Arch-specific MSI data for vPCI. */ +struct vpci_arch_msi { + int pirq; + bool bound; +}; + +/* Arch-specific MSI-X entry data for vPCI. */ +struct vpci_arch_msix_entry { + int pirq; +}; + +enum stdvga_cache_state { + STDVGA_CACHE_UNINITIALIZED, + STDVGA_CACHE_ENABLED, + STDVGA_CACHE_DISABLED +}; + +struct hvm_hw_stdvga { + uint8_t sr_index; + uint8_t sr[8]; + uint8_t gr_index; + uint8_t gr[9]; + bool_t stdvga; + enum stdvga_cache_state cache; + uint32_t latch; + struct page_info *vram_page[64]; /* shadow of 0xa0000-0xaffff */ + spinlock_t lock; +}; + +void stdvga_init(struct domain *d); +void stdvga_deinit(struct domain *d); + +extern void hvm_dpci_msi_eoi(struct domain *d, int vector); + +/* Decode a PCI port IO access into a bus/slot/func/reg. */ +unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr, + pci_sbdf_t *sbdf); + +/* + * HVM port IO handler that performs forwarding of guest IO ports into machine + * IO ports. + */ +void register_g2m_portio_handler(struct domain *d); + +/* HVM port IO handler for vPCI accesses. */ +void register_vpci_portio_handler(struct domain *d); + +/* HVM MMIO handler for PCI MMCFG accesses. */ +int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, + unsigned int start_bus, unsigned int end_bus, + unsigned int seg); +/* Destroy tracked MMCFG areas. */ +void destroy_vpci_mmcfg(struct domain *d); + +/* Check if an address is between a MMCFG region for a domain. */ +bool vpci_is_mmcfg_address(const struct domain *d, paddr_t addr); + +#endif /* __ASM_X86_HVM_IO_H__ */ + + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/ioreq.h b/xen/arch/x86/include/asm/hvm/ioreq.h new file mode 100644 index 0000000000..9b2eb6fedf --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/ioreq.h @@ -0,0 +1,37 @@ +/* + * hvm.h: Hardware virtual machine assist interface definitions. + * + * Copyright (c) 2016 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_IOREQ_H__ +#define __ASM_X86_HVM_IOREQ_H__ + +/* This correlation must not be altered */ +#define IOREQ_STATUS_HANDLED X86EMUL_OKAY +#define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE +#define IOREQ_STATUS_RETRY X86EMUL_RETRY + +#endif /* __ASM_X86_HVM_IOREQ_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/irq.h b/xen/arch/x86/include/asm/hvm/irq.h new file mode 100644 index 0000000000..c4369ceb7a --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/irq.h @@ -0,0 +1,227 @@ +/****************************************************************************** + * irq.h + * + * Interrupt distribution and delivery logic. + * + * Copyright (c) 2006, K A Fraser, XenSource Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_IRQ_H__ +#define __ASM_X86_HVM_IRQ_H__ + +#include + +#include +#include +#include + +struct hvm_irq { + /* + * Virtual interrupt wires for a single PCI bus. + * Indexed by: device*4 + INTx#. + */ + struct hvm_hw_pci_irqs pci_intx; + + /* + * Virtual interrupt wires for ISA devices. + * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). + */ + struct hvm_hw_isa_irqs isa_irq; + + /* + * PCI-ISA interrupt router. + * Each PCI is 'wire-ORed' into one of four links using + * the traditional 'barber's pole' mapping ((device + INTx#) & 3). + * The router provides a programmable mapping from each link to a GSI. + */ + struct hvm_hw_pci_link pci_link; + + /* Virtual interrupt and via-link for paravirtual platform driver. */ + uint32_t callback_via_asserted; + union { + enum { + HVMIRQ_callback_none, + HVMIRQ_callback_gsi, + HVMIRQ_callback_pci_intx, + HVMIRQ_callback_vector + } callback_via_type; + }; + union { + uint32_t gsi; + struct { uint8_t dev, intx; } pci; + uint32_t vector; + } callback_via; + + /* Number of INTx wires asserting each PCI-ISA link. */ + u8 pci_link_assert_count[4]; + + /* + * GSIs map onto PIC/IO-APIC in the usual way: + * 0-7: Master 8259 PIC, IO-APIC pins 0-7 + * 8-15: Slave 8259 PIC, IO-APIC pins 8-15 + * 16+ : IO-APIC pins 16+ + */ + + /* Last VCPU that was delivered a LowestPrio interrupt. */ + u8 round_robin_prev_vcpu; + + struct hvm_irq_dpci *dpci; + + /* + * Number of wires asserting each GSI. + * + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space + * except ISA IRQ 0, which is connected to GSI 2. + * PCI links map into this space via the PCI-ISA bridge. + * + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 + */ + unsigned int nr_gsis; + u8 gsi_assert_count[]; +}; + +#define hvm_pci_intx_gsi(dev, intx) \ + (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16) +#define hvm_pci_intx_link(dev, intx) \ + (((dev) + (intx)) & 3) +#define hvm_domain_irq(d) ((d)->arch.hvm.irq) + +#define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2) + +/* Check/Acknowledge next pending interrupt. */ +struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v); +struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v, + struct hvm_intack intack); + +struct dev_intx_gsi_link { + struct list_head list; + uint8_t bus; + uint8_t device; + uint8_t intx; +}; + +#define _HVM_IRQ_DPCI_MACH_PCI_SHIFT 0 +#define _HVM_IRQ_DPCI_MACH_MSI_SHIFT 1 +#define _HVM_IRQ_DPCI_MAPPED_SHIFT 2 +#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4 +#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5 +#define _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT 6 +#define _HVM_IRQ_DPCI_NO_EOI_SHIFT 7 +#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15 +#define HVM_IRQ_DPCI_MACH_PCI (1u << _HVM_IRQ_DPCI_MACH_PCI_SHIFT) +#define HVM_IRQ_DPCI_MACH_MSI (1u << _HVM_IRQ_DPCI_MACH_MSI_SHIFT) +#define HVM_IRQ_DPCI_MAPPED (1u << _HVM_IRQ_DPCI_MAPPED_SHIFT) +#define HVM_IRQ_DPCI_GUEST_PCI (1u << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT) +#define HVM_IRQ_DPCI_GUEST_MSI (1u << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT) +#define HVM_IRQ_DPCI_IDENTITY_GSI (1u << _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT) +#define HVM_IRQ_DPCI_NO_EOI (1u << _HVM_IRQ_DPCI_NO_EOI_SHIFT) +#define HVM_IRQ_DPCI_TRANSLATE (1u << _HVM_IRQ_DPCI_TRANSLATE_SHIFT) + +struct hvm_gmsi_info { + uint32_t gvec; + uint32_t gflags; + int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */ + bool posted; /* directly deliver to guest via VT-d PI? */ +}; + +struct hvm_girq_dpci_mapping { + struct list_head list; + uint8_t bus; + uint8_t device; + uint8_t intx; + uint8_t machine_gsi; +}; + +#define NR_ISAIRQS 16 +#define NR_LINK 4 +#define NR_HVM_DOMU_IRQS ARRAY_SIZE(((struct hvm_hw_vioapic *)0)->redirtbl) + +/* Protected by domain's event_lock */ +struct hvm_irq_dpci { + /* Guest IRQ to guest device/intx mapping. */ + struct list_head girq[NR_HVM_DOMU_IRQS]; + /* Record of mapped ISA IRQs */ + DECLARE_BITMAP(isairq_map, NR_ISAIRQS); + /* Record of mapped Links */ + uint8_t link_cnt[NR_LINK]; +}; + +/* Machine IRQ to guest device/intx mapping. */ +struct hvm_pirq_dpci { + uint32_t flags; + unsigned int state; + bool masked; + uint16_t pending; + struct list_head digl_list; + struct domain *dom; + struct hvm_gmsi_info gmsi; + struct list_head softirq_list; +}; + +void pt_pirq_init(struct domain *, struct hvm_pirq_dpci *); +bool pt_pirq_cleanup_check(struct hvm_pirq_dpci *); +int pt_pirq_iterate(struct domain *d, + int (*cb)(struct domain *, + struct hvm_pirq_dpci *, void *arg), + void *arg); + +#ifdef CONFIG_HVM +bool pt_pirq_softirq_active(struct hvm_pirq_dpci *); +#else +static inline bool pt_pirq_softirq_active(struct hvm_pirq_dpci *dpci) +{ + return false; +} +#endif + +/* Modify state of a PCI INTx wire. */ +void hvm_pci_intx_assert(struct domain *d, unsigned int device, + unsigned int intx); +void hvm_pci_intx_deassert(struct domain *d, unsigned int device, + unsigned int intx); + +/* + * Modify state of an ISA device's IRQ wire. For some cases, we are + * interested in the interrupt vector of the irq, but once the irq_lock + * is released, the vector may be changed by others. get_vector() callback + * allows us to get the interrupt vector in the protection of irq_lock. + * For most cases, just set get_vector to NULL. + */ +int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq, + int (*get_vector)(const struct domain *d, + unsigned int gsi)); +void hvm_isa_irq_deassert(struct domain *d, unsigned int isa_irq); + +/* Modify state of GSIs. */ +void hvm_gsi_assert(struct domain *d, unsigned int gsi); +void hvm_gsi_deassert(struct domain *d, unsigned int gsi); + +int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq); + +int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data); + +/* Assert/deassert an IO APIC pin. */ +int hvm_ioapic_assert(struct domain *d, unsigned int gsi, bool level); +void hvm_ioapic_deassert(struct domain *d, unsigned int gsi); + +void hvm_maybe_deassert_evtchn_irq(void); +void hvm_assert_evtchn_irq(struct vcpu *v); +void hvm_set_callback_via(struct domain *d, uint64_t via); + +struct pirq; +bool hvm_domain_use_pirq(const struct domain *, const struct pirq *); + +#endif /* __ASM_X86_HVM_IRQ_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/monitor.h b/xen/arch/x86/include/asm/hvm/monitor.h new file mode 100644 index 0000000000..a75cd8545c --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/monitor.h @@ -0,0 +1,65 @@ +/* + * include/asm-x86/hvm/monitor.h + * + * Arch-specific hardware virtual machine monitor abstractions. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_MONITOR_H__ +#define __ASM_X86_HVM_MONITOR_H__ + +#include + +enum hvm_monitor_debug_type +{ + HVM_MONITOR_SOFTWARE_BREAKPOINT, + HVM_MONITOR_SINGLESTEP_BREAKPOINT, + HVM_MONITOR_DEBUG_EXCEPTION, +}; + +/* + * Called for current VCPU on crX/MSR changes by guest. Bool return signals + * whether emulation should be postponed. + */ +bool hvm_monitor_cr(unsigned int index, unsigned long value, + unsigned long old); +#define hvm_monitor_crX(cr, new, old) \ + hvm_monitor_cr(VM_EVENT_X86_##cr, new, old) +bool hvm_monitor_msr(unsigned int msr, uint64_t value, uint64_t old_value); +void hvm_monitor_descriptor_access(uint64_t exit_info, + uint64_t vmx_exit_qualification, + uint8_t descriptor, bool is_write); +int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type, + unsigned int trap_type, unsigned int insn_length, + unsigned int pending_dbg); +int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf, + unsigned int subleaf); +void hvm_monitor_interrupt(unsigned int vector, unsigned int type, + unsigned int err, uint64_t cr2); +bool hvm_monitor_emul_unimplemented(void); + +bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec, + uint16_t kind); + +#endif /* __ASM_X86_HVM_MONITOR_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/nestedhvm.h b/xen/arch/x86/include/asm/hvm/nestedhvm.h new file mode 100644 index 0000000000..d263925786 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/nestedhvm.h @@ -0,0 +1,100 @@ +/* + * Nested HVM + * Copyright (c) 2011, Advanced Micro Devices, Inc. + * Author: Christoph Egger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef _HVM_NESTEDHVM_H +#define _HVM_NESTEDHVM_H + +#include /* for uintNN_t */ +#include /* for struct vcpu, struct domain */ +#include /* for vcpu_nestedhvm */ +#include + +enum nestedhvm_vmexits { + NESTEDHVM_VMEXIT_ERROR = 0, /* inject VMEXIT w/ invalid VMCB */ + NESTEDHVM_VMEXIT_FATALERROR = 1, /* crash first level guest */ + NESTEDHVM_VMEXIT_HOST = 2, /* exit handled on host level */ + NESTEDHVM_VMEXIT_CONTINUE = 3, /* further handling */ + NESTEDHVM_VMEXIT_INJECT = 4, /* inject VMEXIT */ + NESTEDHVM_VMEXIT_DONE = 5, /* VMEXIT handled */ +}; + +/* Nested HVM on/off per domain */ +static inline bool nestedhvm_enabled(const struct domain *d) +{ + return IS_ENABLED(CONFIG_HVM) && (d->options & XEN_DOMCTL_CDF_nested_virt); +} + +/* Nested VCPU */ +int nestedhvm_vcpu_initialise(struct vcpu *v); +void nestedhvm_vcpu_destroy(struct vcpu *v); +void nestedhvm_vcpu_reset(struct vcpu *v); +bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v); +#define nestedhvm_vcpu_enter_guestmode(v) \ + vcpu_nestedhvm(v).nv_guestmode = 1 +#define nestedhvm_vcpu_exit_guestmode(v) \ + vcpu_nestedhvm(v).nv_guestmode = 0 + +/* Nested paging */ +#define NESTEDHVM_PAGEFAULT_DONE 0 +#define NESTEDHVM_PAGEFAULT_INJECT 1 +#define NESTEDHVM_PAGEFAULT_L1_ERROR 2 +#define NESTEDHVM_PAGEFAULT_L0_ERROR 3 +#define NESTEDHVM_PAGEFAULT_MMIO 4 +#define NESTEDHVM_PAGEFAULT_RETRY 5 +#define NESTEDHVM_PAGEFAULT_DIRECT_MMIO 6 +int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, + bool_t access_r, bool_t access_w, bool_t access_x); + +int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, + unsigned int *page_order, uint8_t *p2m_acc, + bool_t access_r, bool_t access_w, bool_t access_x); + +/* IO permission map */ +unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed); + +/* Misc */ +#define nestedhvm_paging_mode_hap(v) (!!nhvm_vmcx_hap_enabled(v)) +#define nestedhvm_vmswitch_in_progress(v) \ + (!!vcpu_nestedhvm((v)).nv_vmswitch_in_progress) + +void nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m); + +static inline bool nestedhvm_is_n2(struct vcpu *v) +{ + if ( !nestedhvm_enabled(v->domain) || + nestedhvm_vmswitch_in_progress(v) || + !nestedhvm_paging_mode_hap(v) ) + return false; + + return nestedhvm_vcpu_in_guestmode(v); +} + +static inline void nestedhvm_set_cr(struct vcpu *v, unsigned int cr, + unsigned long value) +{ + if ( !nestedhvm_vmswitch_in_progress(v) && + nestedhvm_vcpu_in_guestmode(v) ) + v->arch.hvm.nvcpu.guest_cr[cr] = value; +} + +static inline bool vvmcx_valid(const struct vcpu *v) +{ + return vcpu_nestedhvm(v).nv_vvmcxaddr != INVALID_PADDR; +} + +#endif /* _HVM_NESTEDHVM_H */ diff --git a/xen/arch/x86/include/asm/hvm/save.h b/xen/arch/x86/include/asm/hvm/save.h new file mode 100644 index 0000000000..4efc535055 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/save.h @@ -0,0 +1,144 @@ +/* + * save.h: HVM support routines for save/restore + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __XEN_HVM_SAVE_H__ +#define __XEN_HVM_SAVE_H__ + +#include +#include +#include +#include + +/* Marshalling and unmarshalling uses a buffer with size and cursor. */ +typedef struct hvm_domain_context { + uint32_t cur; + uint32_t size; + uint8_t *data; +} hvm_domain_context_t; + +/* Marshalling an entry: check space and fill in the header */ +int _hvm_init_entry(struct hvm_domain_context *h, + uint16_t tc, uint16_t inst, uint32_t len); + +/* Marshalling: copy the contents in a type-safe way */ +void _hvm_write_entry(struct hvm_domain_context *h, + void *src, uint32_t src_len); + +/* Marshalling: init and copy; evaluates to zero on success */ +#define hvm_save_entry(_x, _inst, _h, _src) ({ \ + int r; \ + r = _hvm_init_entry((_h), HVM_SAVE_CODE(_x), \ + (_inst), HVM_SAVE_LENGTH(_x)); \ + if ( r == 0 ) \ + _hvm_write_entry((_h), (_src), HVM_SAVE_LENGTH(_x)); \ + r; }) + +/* Unmarshalling: test an entry's size and typecode and record the instance */ +int _hvm_check_entry(struct hvm_domain_context *h, + uint16_t type, uint32_t len, bool_t strict_length); + +/* Unmarshalling: copy the contents in a type-safe way */ +void _hvm_read_entry(struct hvm_domain_context *h, + void *dest, uint32_t dest_len); + +/* + * Unmarshalling: check, then copy. Evaluates to zero on success. This load + * function requires the save entry to be the same size as the dest structure. + */ +#define _hvm_load_entry(_x, _h, _dst, _strict) ({ \ + int r; \ + struct hvm_save_descriptor *desc \ + = (struct hvm_save_descriptor *)&(_h)->data[(_h)->cur]; \ + if ( (r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), \ + HVM_SAVE_LENGTH(_x), (_strict))) == 0 ) \ + { \ + _hvm_read_entry((_h), (_dst), HVM_SAVE_LENGTH(_x)); \ + if ( HVM_SAVE_HAS_COMPAT(_x) && \ + desc->length != HVM_SAVE_LENGTH(_x) ) \ + r = HVM_SAVE_FIX_COMPAT(_x, (_dst), desc->length); \ + } \ + else if (HVM_SAVE_HAS_COMPAT(_x) \ + && (r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), \ + HVM_SAVE_LENGTH_COMPAT(_x), (_strict))) == 0 ) { \ + _hvm_read_entry((_h), (_dst), HVM_SAVE_LENGTH_COMPAT(_x)); \ + r = HVM_SAVE_FIX_COMPAT(_x, (_dst), desc->length); \ + } \ + r; }) + +#define hvm_load_entry(_x, _h, _dst) \ + _hvm_load_entry(_x, _h, _dst, 1) +#define hvm_load_entry_zeroextend(_x, _h, _dst) \ + _hvm_load_entry(_x, _h, _dst, 0) + +/* Unmarshalling: what is the instance ID of the next entry? */ +static inline unsigned int hvm_load_instance(const struct hvm_domain_context *h) +{ + const struct hvm_save_descriptor *d = (const void *)&h->data[h->cur]; + + return d->instance; +} + +/* Handler types for different types of save-file entry. + * The save handler may save multiple instances of a type into the buffer; + * the load handler will be called once for each instance found when + * restoring. Both return non-zero on error. */ +typedef int (*hvm_save_handler) (struct vcpu *v, + hvm_domain_context_t *h); +typedef int (*hvm_load_handler) (struct domain *d, + hvm_domain_context_t *h); + +/* Init-time function to declare a pair of handlers for a type, + * and the maximum buffer space needed to save this type of state */ +void hvm_register_savevm(uint16_t typecode, + const char *name, + hvm_save_handler save_state, + hvm_load_handler load_state, + size_t size, int kind); + +/* The space needed for saving can be per-domain or per-vcpu: */ +#define HVMSR_PER_DOM 0 +#define HVMSR_PER_VCPU 1 + +/* Syntactic sugar around that function: specify the max number of + * saves, and this calculates the size of buffer needed */ +#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k) \ +static int __init __hvm_register_##_x##_save_and_restore(void) \ +{ \ + hvm_register_savevm(HVM_SAVE_CODE(_x), \ + #_x, \ + &_save, \ + &_load, \ + (_num) * (HVM_SAVE_LENGTH(_x) \ + + sizeof (struct hvm_save_descriptor)), \ + _k); \ + return 0; \ +} \ +__initcall(__hvm_register_##_x##_save_and_restore); + + +/* Entry points for saving and restoring HVM domain state */ +size_t hvm_save_size(struct domain *d); +int hvm_save(struct domain *d, hvm_domain_context_t *h); +int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, + XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz); +int hvm_load(struct domain *d, hvm_domain_context_t *h); + +/* Arch-specific definitions. */ +struct hvm_save_header; +void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr); +int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr); + +#endif /* __XEN_HVM_SAVE_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/support.h b/xen/arch/x86/include/asm/hvm/support.h new file mode 100644 index 0000000000..6b583738ec --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/support.h @@ -0,0 +1,170 @@ +/* + * support.h: HVM support routines used by VT-x and SVM. + * + * Leendert van Doorn, leendert@watson.ibm.com + * Copyright (c) 2005, International Business Machines Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_SUPPORT_H__ +#define __ASM_X86_HVM_SUPPORT_H__ + +#include +#include +#include +#include +#include + +#ifndef NDEBUG +#define DBG_LEVEL_0 (1 << 0) +#define DBG_LEVEL_1 (1 << 1) +#define DBG_LEVEL_2 (1 << 2) +#define DBG_LEVEL_3 (1 << 3) +#define DBG_LEVEL_IO (1 << 4) +#define DBG_LEVEL_VMMU (1 << 5) +#define DBG_LEVEL_VLAPIC (1 << 6) +#define DBG_LEVEL_VLAPIC_TIMER (1 << 7) +#define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8) +#define DBG_LEVEL_IOAPIC (1 << 9) +#define DBG_LEVEL_HCALL (1 << 10) +#define DBG_LEVEL_MSR (1 << 11) + +extern unsigned int opt_hvm_debug_level; +#define HVM_DBG_LOG(level, _f, _a...) \ + do { \ + if ( unlikely((level) & opt_hvm_debug_level) ) \ + printk("[HVM:%d.%d] <%s> " _f "\n", \ + current->domain->domain_id, current->vcpu_id, __func__, \ + ## _a); \ + } while (0) +#else +#define HVM_DBG_LOG(level, _f, _a...) do {} while (0) +#endif + +extern unsigned long hvm_io_bitmap[]; + +enum hvm_translation_result { + HVMTRANS_okay, + HVMTRANS_bad_linear_to_gfn, + HVMTRANS_bad_gfn_to_mfn, + HVMTRANS_unhandleable, + HVMTRANS_gfn_paged_out, + HVMTRANS_gfn_shared, + HVMTRANS_need_retry, +}; + +/* + * Copy to/from a guest physical address. + * Returns HVMTRANS_okay, else HVMTRANS_bad_gfn_to_mfn if the given physical + * address range does not map entirely onto ordinary machine memory. + */ +enum hvm_translation_result hvm_copy_to_guest_phys( + paddr_t paddr, void *buf, unsigned int size, struct vcpu *v); +enum hvm_translation_result hvm_copy_from_guest_phys( + void *buf, paddr_t paddr, unsigned int size); + +/* + * Copy to/from a guest linear address. @pfec should include PFEC_user_mode + * if emulating a user-mode access (CPL=3). All other flags in @pfec are + * managed by the called function: it is therefore optional for the caller + * to set them. + * + * Returns: + * HVMTRANS_okay: Copy was entirely successful. + * HVMTRANS_bad_gfn_to_mfn: Some guest physical address did not map to + * ordinary machine memory. + * HVMTRANS_bad_linear_to_gfn: Some guest linear address did not have a + * valid mapping to a guest physical address. + * The pagefault_info_t structure will be filled + * in if provided. + */ +typedef struct pagefault_info +{ + unsigned long linear; + int ec; +} pagefault_info_t; + +enum hvm_translation_result hvm_copy_to_guest_linear( + unsigned long addr, void *buf, unsigned int size, uint32_t pfec, + pagefault_info_t *pfinfo); +enum hvm_translation_result hvm_copy_from_guest_linear( + void *buf, unsigned long addr, unsigned int size, uint32_t pfec, + pagefault_info_t *pfinfo); +enum hvm_translation_result hvm_copy_from_vcpu_linear( + void *buf, unsigned long addr, unsigned int size, struct vcpu *v, + unsigned int pfec); + +/* + * Get a reference on the page under an HVM physical or linear address. If + * linear, a pagewalk is performed using pfec (fault details optionally in + * pfinfo). + * On success, returns HVMTRANS_okay with a reference taken on **_page. + */ +enum hvm_translation_result hvm_translate_get_page( + struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec, + pagefault_info_t *pfinfo, struct page_info **page_p, + gfn_t *gfn_p, p2m_type_t *p2mt_p); + +#define HVM_HCALL_completed 0 /* hypercall completed - no further action */ +#define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */ +int hvm_hypercall(struct cpu_user_regs *regs); + +void hvm_hlt(unsigned int eflags); +void hvm_triple_fault(void); + +#define VM86_TSS_UPDATED (1ULL << 63) +void hvm_prepare_vm86_tss(struct vcpu *v, uint32_t base, uint32_t limit); + +void hvm_rdtsc_intercept(struct cpu_user_regs *regs); + +int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv); + +void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value); + +/* + * These functions all return X86EMUL return codes. For hvm_set_*(), the + * caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is + * returned. + */ +int hvm_set_efer(uint64_t value); +int hvm_set_cr0(unsigned long value, bool may_defer); +int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer); +int hvm_set_cr4(unsigned long value, bool may_defer); +int hvm_descriptor_access_intercept(uint64_t exit_info, + uint64_t vmx_exit_qualification, + unsigned int descriptor, bool is_write); +int hvm_mov_to_cr(unsigned int cr, unsigned int gpr); +int hvm_mov_from_cr(unsigned int cr, unsigned int gpr); +void hvm_ud_intercept(struct cpu_user_regs *); + +/* + * May return X86EMUL_EXCEPTION, at which point the caller is responsible for + * injecting a #GP fault. Used to support speculative reads. + */ +int __must_check hvm_msr_read_intercept( + unsigned int msr, uint64_t *msr_content); +int __must_check hvm_msr_write_intercept( + unsigned int msr, uint64_t msr_content, bool may_defer); + +#endif /* __ASM_X86_HVM_SUPPORT_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/svm/asid.h b/xen/arch/x86/include/asm/hvm/svm/asid.h new file mode 100644 index 0000000000..0e5ec3ab78 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/svm/asid.h @@ -0,0 +1,49 @@ +/* + * asid.h: handling ASIDs in SVM. + * Copyright (c) 2007, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_SVM_ASID_H__ +#define __ASM_X86_HVM_SVM_ASID_H__ + +#include +#include +#include + +void svm_asid_init(const struct cpuinfo_x86 *c); +void svm_asid_handle_vmrun(void); + +static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_linear) +{ +#if 0 + /* Optimization? */ + svm_invlpga(g_linear, v->arch.hvm.svm.vmcb->guest_asid); +#endif + + /* Safe fallback. Take a new ASID. */ + hvm_asid_flush_vcpu(v); +} + +#endif /* __ASM_X86_HVM_SVM_ASID_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/svm/emulate.h b/xen/arch/x86/include/asm/hvm/svm/emulate.h new file mode 100644 index 0000000000..eb1a8c24af --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/svm/emulate.h @@ -0,0 +1,66 @@ +/* + * emulate.h: SVM instruction emulation bits. + * Copyright (c) 2005, AMD Corporation. + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_SVM_EMULATE_H__ +#define __ASM_X86_HVM_SVM_EMULATE_H__ + +/* + * Encoding for svm_get_insn_len(). We take X86EMUL_OPC() for the main + * opcode, shifted left to make room for the ModRM byte. + * + * The Grp7 instructions have their ModRM byte expressed in octal for easier + * cross referencing with the opcode extension table. + */ +#define INSTR_ENC(opc, modrm) (((opc) << 8) | (modrm)) + +#define INSTR_PAUSE INSTR_ENC(X86EMUL_OPC_F3(0, 0x90), 0) +#define INSTR_INT3 INSTR_ENC(X86EMUL_OPC( 0, 0xcc), 0) +#define INSTR_ICEBP INSTR_ENC(X86EMUL_OPC( 0, 0xf1), 0) +#define INSTR_HLT INSTR_ENC(X86EMUL_OPC( 0, 0xf4), 0) +#define INSTR_XSETBV INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0321) +#define INSTR_VMRUN INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0330) +#define INSTR_VMCALL INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0331) +#define INSTR_VMLOAD INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0332) +#define INSTR_VMSAVE INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0333) +#define INSTR_STGI INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0334) +#define INSTR_CLGI INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0335) +#define INSTR_INVLPGA INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0337) +#define INSTR_RDTSCP INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0371) +#define INSTR_INVD INSTR_ENC(X86EMUL_OPC(0x0f, 0x08), 0) +#define INSTR_WBINVD INSTR_ENC(X86EMUL_OPC(0x0f, 0x09), 0) +#define INSTR_WRMSR INSTR_ENC(X86EMUL_OPC(0x0f, 0x30), 0) +#define INSTR_RDTSC INSTR_ENC(X86EMUL_OPC(0x0f, 0x31), 0) +#define INSTR_RDMSR INSTR_ENC(X86EMUL_OPC(0x0f, 0x32), 0) +#define INSTR_CPUID INSTR_ENC(X86EMUL_OPC(0x0f, 0xa2), 0) + +struct vcpu; + +unsigned int svm_get_insn_len(struct vcpu *v, unsigned int instr_enc); +unsigned int svm_get_task_switch_insn_len(void); + +#endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/svm/intr.h b/xen/arch/x86/include/asm/hvm/svm/intr.h new file mode 100644 index 0000000000..ae52d9f948 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/svm/intr.h @@ -0,0 +1,25 @@ +/* + * intr.h: SVM Architecture related definitions + * Copyright (c) 2005, AMD Corporation. + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ + +#ifndef __ASM_X86_HVM_SVM_INTR_H__ +#define __ASM_X86_HVM_SVM_INTR_H__ + +void svm_intr_assist(void); + +#endif /* __ASM_X86_HVM_SVM_INTR_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h b/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h new file mode 100644 index 0000000000..0873698457 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h @@ -0,0 +1,145 @@ +/* + * nestedsvm.h: Nested Virtualization + * Copyright (c) 2011, Advanced Micro Devices, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ +#ifndef __ASM_X86_HVM_SVM_NESTEDSVM_H__ +#define __ASM_X86_HVM_SVM_NESTEDSVM_H__ + +#include +#include + +/* SVM specific intblk types, cannot be an enum because gcc 4.5 complains */ +/* GIF cleared */ +#define hvm_intblk_svm_gif hvm_intblk_arch + +struct nestedsvm { + bool_t ns_gif; + uint64_t ns_msr_hsavepa; /* MSR HSAVE_PA value */ + + /* l1 guest physical address of virtual vmcb used by prior VMRUN. + * Needed for VMCB Cleanbit emulation. + */ + uint64_t ns_ovvmcb_pa; + + /* virtual tscratio holding the value l1 guest writes to the + * MSR_AMD64_TSC_RATIO MSR. + */ + uint64_t ns_tscratio; + + /* Cached real intercepts of the l2 guest */ + uint32_t ns_cr_intercepts; + uint32_t ns_dr_intercepts; + uint32_t ns_exception_intercepts; + uint32_t ns_general1_intercepts; + uint32_t ns_general2_intercepts; + + /* Cached real lbr and other virtual extentions of the l2 guest */ + virt_ext_t ns_virt_ext; + + /* Cached real MSR permission bitmaps of the l2 guest */ + unsigned long *ns_cached_msrpm; + /* Merged MSR permission bitmap */ + unsigned long *ns_merged_msrpm; + + /* guest physical address of virtual io permission map */ + paddr_t ns_iomap_pa, ns_oiomap_pa; + /* Shadow io permission map */ + unsigned long *ns_iomap; + + uint64_t ns_cr0; /* Cached guest_cr[0] of l1 guest while l2 guest runs. + * Needed to handle FPU context switching */ + + /* Cache guest cr3/host cr3 the guest sets up for the l2 guest. + * Used by Shadow-on-Shadow and Nested-on-Nested. + * ns_vmcb_guestcr3: in l2 guest physical address space and points to + * the l2 guest page table + * ns_vmcb_hostcr3: in l1 guest physical address space and points to + * the l1 guest nested page table + */ + uint64_t ns_vmcb_guestcr3, ns_vmcb_hostcr3; + uint32_t ns_guest_asid; + + bool_t ns_hap_enabled; + + /* Only meaningful when vmexit_pending flag is set */ + struct { + uint64_t exitcode; /* native exitcode to inject into l1 guest */ + uint64_t exitinfo1; /* additional information to the exitcode */ + uint64_t exitinfo2; /* additional information to the exitcode */ + } ns_vmexit; + union { + uint32_t bytes; + struct { + uint32_t rflagsif: 1; + uint32_t vintrmask: 1; + uint32_t reserved: 30; + } fields; + } ns_hostflags; +}; + +#define vcpu_nestedsvm(v) (vcpu_nestedhvm(v).u.nsvm) + +/* True when l1 guest enabled SVM in EFER */ +#define nsvm_efer_svm_enabled(v) \ + (!!((v)->arch.hvm.guest_efer & EFER_SVME)) + +int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr); +void nestedsvm_vmexit_defer(struct vcpu *v, + uint64_t exitcode, uint64_t exitinfo1, uint64_t exitinfo2); +enum nestedhvm_vmexits +nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs *regs); +enum nestedhvm_vmexits +nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t exitcode); +void svm_nested_features_on_efer_update(struct vcpu *v); + +/* Interface methods */ +void nsvm_vcpu_destroy(struct vcpu *v); +int nsvm_vcpu_initialise(struct vcpu *v); +int nsvm_vcpu_reset(struct vcpu *v); +int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs); +int nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event *event); +uint64_t nsvm_vcpu_hostcr3(struct vcpu *v); +bool_t nsvm_vmcb_guest_intercepts_event( + struct vcpu *v, unsigned int vector, int errcode); +bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); +enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); + +/* Interrupts, vGIF */ +void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v); +void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v); +bool_t nestedsvm_gif_isset(struct vcpu *v); +int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, + unsigned int *page_order, uint8_t *p2m_acc, + bool_t access_r, bool_t access_w, bool_t access_x); + +#define NSVM_INTR_NOTHANDLED 3 +#define NSVM_INTR_NOTINTERCEPTED 2 +#define NSVM_INTR_FORCEVMEXIT 1 +#define NSVM_INTR_MASKED 0 +int nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack); + +#endif /* ASM_X86_HVM_SVM_NESTEDSVM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/svm/svm.h b/xen/arch/x86/include/asm/hvm/svm/svm.h new file mode 100644 index 0000000000..05e9685026 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/svm/svm.h @@ -0,0 +1,110 @@ +/* + * svm.h: SVM Architecture related definitions + * Copyright (c) 2005, AMD Corporation. + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ + +#ifndef __ASM_X86_HVM_SVM_H__ +#define __ASM_X86_HVM_SVM_H__ + +#include + +static inline void svm_vmload_pa(paddr_t vmcb) +{ + asm volatile ( + ".byte 0x0f,0x01,0xda" /* vmload */ + : : "a" (vmcb) : "memory" ); +} + +static inline void svm_vmsave_pa(paddr_t vmcb) +{ + asm volatile ( + ".byte 0x0f,0x01,0xdb" /* vmsave */ + : : "a" (vmcb) : "memory" ); +} + +static inline void svm_invlpga(unsigned long linear, uint32_t asid) +{ + asm volatile ( + ".byte 0x0f,0x01,0xdf" + : /* output */ + : /* input */ + "a" (linear), "c" (asid)); +} + +unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr); +void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len); +void svm_update_guest_cr(struct vcpu *, unsigned int cr, unsigned int flags); + +/* + * PV context switch helpers. Prefetching the VMCB area itself has been shown + * to be useful for performance. + * + * Must only be used for NUL FS/GS, as the segment attributes/limits are not + * read from the GDT/LDT. + */ +void svm_load_segs_prefetch(void); +bool svm_load_segs(unsigned int ldt_ents, unsigned long ldt_base, + unsigned long fs_base, unsigned long gs_base, + unsigned long gs_shadow); + +extern u32 svm_feature_flags; + +#define SVM_FEATURE_NPT 0 /* Nested page table support */ +#define SVM_FEATURE_LBRV 1 /* LBR virtualization support */ +#define SVM_FEATURE_SVML 2 /* SVM locking MSR support */ +#define SVM_FEATURE_NRIPS 3 /* Next RIP save on VMEXIT support */ +#define SVM_FEATURE_TSCRATEMSR 4 /* TSC ratio MSR support */ +#define SVM_FEATURE_VMCBCLEAN 5 /* VMCB clean bits support */ +#define SVM_FEATURE_FLUSHBYASID 6 /* TLB flush by ASID support */ +#define SVM_FEATURE_DECODEASSISTS 7 /* Decode assists support */ +#define SVM_FEATURE_PAUSEFILTER 10 /* Pause intercept filter support */ +#define SVM_FEATURE_PAUSETHRESH 12 /* Pause intercept filter support */ +#define SVM_FEATURE_VLOADSAVE 15 /* virtual vmload/vmsave */ +#define SVM_FEATURE_VGIF 16 /* Virtual GIF */ +#define SVM_FEATURE_SSS 19 /* NPT Supervisor Shadow Stacks */ +#define SVM_FEATURE_SPEC_CTRL 20 /* MSR_SPEC_CTRL virtualisation */ + +#define cpu_has_svm_feature(f) (svm_feature_flags & (1u << (f))) +#define cpu_has_svm_npt cpu_has_svm_feature(SVM_FEATURE_NPT) +#define cpu_has_svm_lbrv cpu_has_svm_feature(SVM_FEATURE_LBRV) +#define cpu_has_svm_svml cpu_has_svm_feature(SVM_FEATURE_SVML) +#define cpu_has_svm_nrips cpu_has_svm_feature(SVM_FEATURE_NRIPS) +#define cpu_has_svm_cleanbits cpu_has_svm_feature(SVM_FEATURE_VMCBCLEAN) +#define cpu_has_svm_flushbyasid cpu_has_svm_feature(SVM_FEATURE_FLUSHBYASID) +#define cpu_has_svm_decode cpu_has_svm_feature(SVM_FEATURE_DECODEASSISTS) +#define cpu_has_svm_vgif cpu_has_svm_feature(SVM_FEATURE_VGIF) +#define cpu_has_pause_filter cpu_has_svm_feature(SVM_FEATURE_PAUSEFILTER) +#define cpu_has_pause_thresh cpu_has_svm_feature(SVM_FEATURE_PAUSETHRESH) +#define cpu_has_tsc_ratio cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR) +#define cpu_has_svm_vloadsave cpu_has_svm_feature(SVM_FEATURE_VLOADSAVE) +#define cpu_has_svm_sss cpu_has_svm_feature(SVM_FEATURE_SSS) +#define cpu_has_svm_spec_ctrl cpu_has_svm_feature(SVM_FEATURE_SPEC_CTRL) + +#define SVM_PAUSEFILTER_INIT 4000 +#define SVM_PAUSETHRESH_INIT 1000 + +/* TSC rate */ +#define DEFAULT_TSC_RATIO 0x0000000100000000ULL +#define TSC_RATIO_RSVD_BITS 0xffffff0000000000ULL + +/* EXITINFO1 fields on NPT faults */ +#define _NPT_PFEC_with_gla 32 +#define NPT_PFEC_with_gla (1UL<<_NPT_PFEC_with_gla) +#define _NPT_PFEC_in_gpt 33 +#define NPT_PFEC_in_gpt (1UL<<_NPT_PFEC_in_gpt) + +#endif /* __ASM_X86_HVM_SVM_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/svm/svmdebug.h b/xen/arch/x86/include/asm/hvm/svm/svmdebug.h new file mode 100644 index 0000000000..330c1d91aa --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/svm/svmdebug.h @@ -0,0 +1,30 @@ +/* + * svmdebug.h: SVM related debug defintions + * Copyright (c) 2011, AMD Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ + +#ifndef __ASM_X86_HVM_SVM_SVMDEBUG_H__ +#define __ASM_X86_HVM_SVM_SVMDEBUG_H__ + +#include +#include + +void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state); +void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb); +bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb, + const struct vcpu *v, bool verbose); + +#endif /* __ASM_X86_HVM_SVM_SVMDEBUG_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h b/xen/arch/x86/include/asm/hvm/svm/vmcb.h new file mode 100644 index 0000000000..ed7cebea71 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h @@ -0,0 +1,664 @@ +/* + * vmcb.h: VMCB related definitions + * Copyright (c) 2005-2007, Advanced Micro Devices, Inc + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ +#ifndef __ASM_X86_HVM_SVM_VMCB_H__ +#define __ASM_X86_HVM_SVM_VMCB_H__ + +#include + +/* general 1 intercepts */ +enum GenericIntercept1bits +{ + GENERAL1_INTERCEPT_INTR = 1 << 0, + GENERAL1_INTERCEPT_NMI = 1 << 1, + GENERAL1_INTERCEPT_SMI = 1 << 2, + GENERAL1_INTERCEPT_INIT = 1 << 3, + GENERAL1_INTERCEPT_VINTR = 1 << 4, + GENERAL1_INTERCEPT_CR0_SEL_WRITE = 1 << 5, + GENERAL1_INTERCEPT_IDTR_READ = 1 << 6, + GENERAL1_INTERCEPT_GDTR_READ = 1 << 7, + GENERAL1_INTERCEPT_LDTR_READ = 1 << 8, + GENERAL1_INTERCEPT_TR_READ = 1 << 9, + GENERAL1_INTERCEPT_IDTR_WRITE = 1 << 10, + GENERAL1_INTERCEPT_GDTR_WRITE = 1 << 11, + GENERAL1_INTERCEPT_LDTR_WRITE = 1 << 12, + GENERAL1_INTERCEPT_TR_WRITE = 1 << 13, + GENERAL1_INTERCEPT_RDTSC = 1 << 14, + GENERAL1_INTERCEPT_RDPMC = 1 << 15, + GENERAL1_INTERCEPT_PUSHF = 1 << 16, + GENERAL1_INTERCEPT_POPF = 1 << 17, + GENERAL1_INTERCEPT_CPUID = 1 << 18, + GENERAL1_INTERCEPT_RSM = 1 << 19, + GENERAL1_INTERCEPT_IRET = 1 << 20, + GENERAL1_INTERCEPT_SWINT = 1 << 21, + GENERAL1_INTERCEPT_INVD = 1 << 22, + GENERAL1_INTERCEPT_PAUSE = 1 << 23, + GENERAL1_INTERCEPT_HLT = 1 << 24, + GENERAL1_INTERCEPT_INVLPG = 1 << 25, + GENERAL1_INTERCEPT_INVLPGA = 1 << 26, + GENERAL1_INTERCEPT_IOIO_PROT = 1 << 27, + GENERAL1_INTERCEPT_MSR_PROT = 1 << 28, + GENERAL1_INTERCEPT_TASK_SWITCH = 1 << 29, + GENERAL1_INTERCEPT_FERR_FREEZE = 1 << 30, + GENERAL1_INTERCEPT_SHUTDOWN_EVT = 1u << 31 +}; + +/* general 2 intercepts */ +enum GenericIntercept2bits +{ + GENERAL2_INTERCEPT_VMRUN = 1 << 0, + GENERAL2_INTERCEPT_VMMCALL = 1 << 1, + GENERAL2_INTERCEPT_VMLOAD = 1 << 2, + GENERAL2_INTERCEPT_VMSAVE = 1 << 3, + GENERAL2_INTERCEPT_STGI = 1 << 4, + GENERAL2_INTERCEPT_CLGI = 1 << 5, + GENERAL2_INTERCEPT_SKINIT = 1 << 6, + GENERAL2_INTERCEPT_RDTSCP = 1 << 7, + GENERAL2_INTERCEPT_ICEBP = 1 << 8, + GENERAL2_INTERCEPT_WBINVD = 1 << 9, + GENERAL2_INTERCEPT_MONITOR = 1 << 10, + GENERAL2_INTERCEPT_MWAIT = 1 << 11, + GENERAL2_INTERCEPT_MWAIT_CONDITIONAL = 1 << 12, + GENERAL2_INTERCEPT_XSETBV = 1 << 13, + GENERAL2_INTERCEPT_RDPRU = 1 << 14, +}; + + +/* control register intercepts */ +enum CRInterceptBits +{ + CR_INTERCEPT_CR0_READ = 1 << 0, + CR_INTERCEPT_CR1_READ = 1 << 1, + CR_INTERCEPT_CR2_READ = 1 << 2, + CR_INTERCEPT_CR3_READ = 1 << 3, + CR_INTERCEPT_CR4_READ = 1 << 4, + CR_INTERCEPT_CR5_READ = 1 << 5, + CR_INTERCEPT_CR6_READ = 1 << 6, + CR_INTERCEPT_CR7_READ = 1 << 7, + CR_INTERCEPT_CR8_READ = 1 << 8, + CR_INTERCEPT_CR9_READ = 1 << 9, + CR_INTERCEPT_CR10_READ = 1 << 10, + CR_INTERCEPT_CR11_READ = 1 << 11, + CR_INTERCEPT_CR12_READ = 1 << 12, + CR_INTERCEPT_CR13_READ = 1 << 13, + CR_INTERCEPT_CR14_READ = 1 << 14, + CR_INTERCEPT_CR15_READ = 1 << 15, + CR_INTERCEPT_CR0_WRITE = 1 << 16, + CR_INTERCEPT_CR1_WRITE = 1 << 17, + CR_INTERCEPT_CR2_WRITE = 1 << 18, + CR_INTERCEPT_CR3_WRITE = 1 << 19, + CR_INTERCEPT_CR4_WRITE = 1 << 20, + CR_INTERCEPT_CR5_WRITE = 1 << 21, + CR_INTERCEPT_CR6_WRITE = 1 << 22, + CR_INTERCEPT_CR7_WRITE = 1 << 23, + CR_INTERCEPT_CR8_WRITE = 1 << 24, + CR_INTERCEPT_CR9_WRITE = 1 << 25, + CR_INTERCEPT_CR10_WRITE = 1 << 26, + CR_INTERCEPT_CR11_WRITE = 1 << 27, + CR_INTERCEPT_CR12_WRITE = 1 << 28, + CR_INTERCEPT_CR13_WRITE = 1 << 29, + CR_INTERCEPT_CR14_WRITE = 1 << 30, + CR_INTERCEPT_CR15_WRITE = 1u << 31, +}; + + +/* debug register intercepts */ +enum DRInterceptBits +{ + DR_INTERCEPT_DR0_READ = 1 << 0, + DR_INTERCEPT_DR1_READ = 1 << 1, + DR_INTERCEPT_DR2_READ = 1 << 2, + DR_INTERCEPT_DR3_READ = 1 << 3, + DR_INTERCEPT_DR4_READ = 1 << 4, + DR_INTERCEPT_DR5_READ = 1 << 5, + DR_INTERCEPT_DR6_READ = 1 << 6, + DR_INTERCEPT_DR7_READ = 1 << 7, + DR_INTERCEPT_DR8_READ = 1 << 8, + DR_INTERCEPT_DR9_READ = 1 << 9, + DR_INTERCEPT_DR10_READ = 1 << 10, + DR_INTERCEPT_DR11_READ = 1 << 11, + DR_INTERCEPT_DR12_READ = 1 << 12, + DR_INTERCEPT_DR13_READ = 1 << 13, + DR_INTERCEPT_DR14_READ = 1 << 14, + DR_INTERCEPT_DR15_READ = 1 << 15, + DR_INTERCEPT_DR0_WRITE = 1 << 16, + DR_INTERCEPT_DR1_WRITE = 1 << 17, + DR_INTERCEPT_DR2_WRITE = 1 << 18, + DR_INTERCEPT_DR3_WRITE = 1 << 19, + DR_INTERCEPT_DR4_WRITE = 1 << 20, + DR_INTERCEPT_DR5_WRITE = 1 << 21, + DR_INTERCEPT_DR6_WRITE = 1 << 22, + DR_INTERCEPT_DR7_WRITE = 1 << 23, + DR_INTERCEPT_DR8_WRITE = 1 << 24, + DR_INTERCEPT_DR9_WRITE = 1 << 25, + DR_INTERCEPT_DR10_WRITE = 1 << 26, + DR_INTERCEPT_DR11_WRITE = 1 << 27, + DR_INTERCEPT_DR12_WRITE = 1 << 28, + DR_INTERCEPT_DR13_WRITE = 1 << 29, + DR_INTERCEPT_DR14_WRITE = 1 << 30, + DR_INTERCEPT_DR15_WRITE = 1u << 31, +}; + +enum VMEXIT_EXITCODE +{ + /* control register read exitcodes */ + VMEXIT_CR0_READ = 0, /* 0x0 */ + VMEXIT_CR1_READ = 1, /* 0x1 */ + VMEXIT_CR2_READ = 2, /* 0x2 */ + VMEXIT_CR3_READ = 3, /* 0x3 */ + VMEXIT_CR4_READ = 4, /* 0x4 */ + VMEXIT_CR5_READ = 5, /* 0x5 */ + VMEXIT_CR6_READ = 6, /* 0x6 */ + VMEXIT_CR7_READ = 7, /* 0x7 */ + VMEXIT_CR8_READ = 8, /* 0x8 */ + VMEXIT_CR9_READ = 9, /* 0x9 */ + VMEXIT_CR10_READ = 10, /* 0xa */ + VMEXIT_CR11_READ = 11, /* 0xb */ + VMEXIT_CR12_READ = 12, /* 0xc */ + VMEXIT_CR13_READ = 13, /* 0xd */ + VMEXIT_CR14_READ = 14, /* 0xe */ + VMEXIT_CR15_READ = 15, /* 0xf */ + + /* control register write exitcodes */ + VMEXIT_CR0_WRITE = 16, /* 0x10 */ + VMEXIT_CR1_WRITE = 17, /* 0x11 */ + VMEXIT_CR2_WRITE = 18, /* 0x12 */ + VMEXIT_CR3_WRITE = 19, /* 0x13 */ + VMEXIT_CR4_WRITE = 20, /* 0x14 */ + VMEXIT_CR5_WRITE = 21, /* 0x15 */ + VMEXIT_CR6_WRITE = 22, /* 0x16 */ + VMEXIT_CR7_WRITE = 23, /* 0x17 */ + VMEXIT_CR8_WRITE = 24, /* 0x18 */ + VMEXIT_CR9_WRITE = 25, /* 0x19 */ + VMEXIT_CR10_WRITE = 26, /* 0x1a */ + VMEXIT_CR11_WRITE = 27, /* 0x1b */ + VMEXIT_CR12_WRITE = 28, /* 0x1c */ + VMEXIT_CR13_WRITE = 29, /* 0x1d */ + VMEXIT_CR14_WRITE = 30, /* 0x1e */ + VMEXIT_CR15_WRITE = 31, /* 0x1f */ + + /* debug register read exitcodes */ + VMEXIT_DR0_READ = 32, /* 0x20 */ + VMEXIT_DR1_READ = 33, /* 0x21 */ + VMEXIT_DR2_READ = 34, /* 0x22 */ + VMEXIT_DR3_READ = 35, /* 0x23 */ + VMEXIT_DR4_READ = 36, /* 0x24 */ + VMEXIT_DR5_READ = 37, /* 0x25 */ + VMEXIT_DR6_READ = 38, /* 0x26 */ + VMEXIT_DR7_READ = 39, /* 0x27 */ + VMEXIT_DR8_READ = 40, /* 0x28 */ + VMEXIT_DR9_READ = 41, /* 0x29 */ + VMEXIT_DR10_READ = 42, /* 0x2a */ + VMEXIT_DR11_READ = 43, /* 0x2b */ + VMEXIT_DR12_READ = 44, /* 0x2c */ + VMEXIT_DR13_READ = 45, /* 0x2d */ + VMEXIT_DR14_READ = 46, /* 0x2e */ + VMEXIT_DR15_READ = 47, /* 0x2f */ + + /* debug register write exitcodes */ + VMEXIT_DR0_WRITE = 48, /* 0x30 */ + VMEXIT_DR1_WRITE = 49, /* 0x31 */ + VMEXIT_DR2_WRITE = 50, /* 0x32 */ + VMEXIT_DR3_WRITE = 51, /* 0x33 */ + VMEXIT_DR4_WRITE = 52, /* 0x34 */ + VMEXIT_DR5_WRITE = 53, /* 0x35 */ + VMEXIT_DR6_WRITE = 54, /* 0x36 */ + VMEXIT_DR7_WRITE = 55, /* 0x37 */ + VMEXIT_DR8_WRITE = 56, /* 0x38 */ + VMEXIT_DR9_WRITE = 57, /* 0x39 */ + VMEXIT_DR10_WRITE = 58, /* 0x3a */ + VMEXIT_DR11_WRITE = 59, /* 0x3b */ + VMEXIT_DR12_WRITE = 60, /* 0x3c */ + VMEXIT_DR13_WRITE = 61, /* 0x3d */ + VMEXIT_DR14_WRITE = 62, /* 0x3e */ + VMEXIT_DR15_WRITE = 63, /* 0x3f */ + + /* processor exception exitcodes (VMEXIT_EXCP[0-31]) */ + VMEXIT_EXCEPTION_DE = 64, /* 0x40, divide-by-zero-error */ + VMEXIT_EXCEPTION_DB = 65, /* 0x41, debug */ + VMEXIT_EXCEPTION_NMI = 66, /* 0x42, non-maskable-interrupt */ + VMEXIT_EXCEPTION_BP = 67, /* 0x43, breakpoint */ + VMEXIT_EXCEPTION_OF = 68, /* 0x44, overflow */ + VMEXIT_EXCEPTION_BR = 69, /* 0x45, bound-range */ + VMEXIT_EXCEPTION_UD = 70, /* 0x46, invalid-opcode*/ + VMEXIT_EXCEPTION_NM = 71, /* 0x47, device-not-available */ + VMEXIT_EXCEPTION_DF = 72, /* 0x48, double-fault */ + VMEXIT_EXCEPTION_09 = 73, /* 0x49, unsupported (reserved) */ + VMEXIT_EXCEPTION_TS = 74, /* 0x4a, invalid-tss */ + VMEXIT_EXCEPTION_NP = 75, /* 0x4b, segment-not-present */ + VMEXIT_EXCEPTION_SS = 76, /* 0x4c, stack */ + VMEXIT_EXCEPTION_GP = 77, /* 0x4d, general-protection */ + VMEXIT_EXCEPTION_PF = 78, /* 0x4e, page-fault */ + VMEXIT_EXCEPTION_15 = 79, /* 0x4f, reserved */ + VMEXIT_EXCEPTION_MF = 80, /* 0x50, x87 floating-point exception-pending */ + VMEXIT_EXCEPTION_AC = 81, /* 0x51, alignment-check */ + VMEXIT_EXCEPTION_MC = 82, /* 0x52, machine-check */ + VMEXIT_EXCEPTION_XF = 83, /* 0x53, simd floating-point */ +/* VMEXIT_EXCEPTION_20 = 84, 0x54, #VE (Intel specific) */ + VMEXIT_EXCEPTION_CP = 85, /* 0x55, controlflow protection */ + + /* exceptions 20-31 (exitcodes 84-95) are reserved */ + + /* ...and the rest of the #VMEXITs */ + VMEXIT_INTR = 96, /* 0x60 */ + VMEXIT_NMI = 97, /* 0x61 */ + VMEXIT_SMI = 98, /* 0x62 */ + VMEXIT_INIT = 99, /* 0x63 */ + VMEXIT_VINTR = 100, /* 0x64 */ + VMEXIT_CR0_SEL_WRITE = 101, /* 0x65 */ + VMEXIT_IDTR_READ = 102, /* 0x66 */ + VMEXIT_GDTR_READ = 103, /* 0x67 */ + VMEXIT_LDTR_READ = 104, /* 0x68 */ + VMEXIT_TR_READ = 105, /* 0x69 */ + VMEXIT_IDTR_WRITE = 106, /* 0x6a */ + VMEXIT_GDTR_WRITE = 107, /* 0x6b */ + VMEXIT_LDTR_WRITE = 108, /* 0x6c */ + VMEXIT_TR_WRITE = 109, /* 0x6d */ + VMEXIT_RDTSC = 110, /* 0x6e */ + VMEXIT_RDPMC = 111, /* 0x6f */ + VMEXIT_PUSHF = 112, /* 0x70 */ + VMEXIT_POPF = 113, /* 0x71 */ + VMEXIT_CPUID = 114, /* 0x72 */ + VMEXIT_RSM = 115, /* 0x73 */ + VMEXIT_IRET = 116, /* 0x74 */ + VMEXIT_SWINT = 117, /* 0x75 */ + VMEXIT_INVD = 118, /* 0x76 */ + VMEXIT_PAUSE = 119, /* 0x77 */ + VMEXIT_HLT = 120, /* 0x78 */ + VMEXIT_INVLPG = 121, /* 0x79 */ + VMEXIT_INVLPGA = 122, /* 0x7a */ + VMEXIT_IOIO = 123, /* 0x7b */ + VMEXIT_MSR = 124, /* 0x7c */ + VMEXIT_TASK_SWITCH = 125, /* 0x7d */ + VMEXIT_FERR_FREEZE = 126, /* 0x7e */ + VMEXIT_SHUTDOWN = 127, /* 0x7f */ + VMEXIT_VMRUN = 128, /* 0x80 */ + VMEXIT_VMMCALL = 129, /* 0x81 */ + VMEXIT_VMLOAD = 130, /* 0x82 */ + VMEXIT_VMSAVE = 131, /* 0x83 */ + VMEXIT_STGI = 132, /* 0x84 */ + VMEXIT_CLGI = 133, /* 0x85 */ + VMEXIT_SKINIT = 134, /* 0x86 */ + VMEXIT_RDTSCP = 135, /* 0x87 */ + VMEXIT_ICEBP = 136, /* 0x88 */ + VMEXIT_WBINVD = 137, /* 0x89 */ + VMEXIT_MONITOR = 138, /* 0x8a */ + VMEXIT_MWAIT = 139, /* 0x8b */ + VMEXIT_MWAIT_CONDITIONAL= 140, /* 0x8c */ + VMEXIT_XSETBV = 141, /* 0x8d */ + VMEXIT_RDPRU = 142, /* 0x8e */ + VMEXIT_NPF = 1024, /* 0x400, nested paging fault */ + VMEXIT_INVALID = -1 +}; + +enum +{ + /* Available on all SVM-capable hardware. */ + TLB_CTRL_NO_FLUSH = 0, + TLB_CTRL_FLUSH_ALL = 1, + + /* Available with the FlushByASID feature. */ + TLB_CTRL_FLUSH_ASID = 3, + TLB_CTRL_FLUSH_ASID_NONGLOBAL = 7, +}; + +typedef union +{ + struct + { + uint8_t vector; + uint8_t type:3; + bool ev:1; + uint32_t resvd1:19; + bool v:1; + uint32_t ec; + }; + uint64_t raw; +} intinfo_t; + +typedef union { + struct { + bool intr_shadow: 1; + bool guest_intr_mask:1; + }; + uint64_t raw; +} intstat_t; + +typedef union +{ + u64 bytes; + struct + { + u64 tpr: 8; + u64 irq: 1; + u64 vgif: 1; + u64 rsvd0: 6; + u64 prio: 4; + u64 ign_tpr: 1; + u64 rsvd1: 3; + u64 intr_masking: 1; + u64 vgif_enable: 1; + u64 rsvd2: 6; + u64 vector: 8; + u64 rsvd3: 24; + } fields; +} vintr_t; + +typedef union +{ + u64 bytes; + struct + { + u64 type: 1; + u64 rsv0: 1; + u64 str: 1; + u64 rep: 1; + u64 sz8: 1; + u64 sz16: 1; + u64 sz32: 1; + u64 rsv1: 9; + u64 port: 16; + } fields; +} ioio_info_t; + +typedef union +{ + u64 bytes; + struct + { + u64 lbr_enable:1; + u64 vloadsave_enable:1; + } fields; +} virt_ext_t; + +typedef union +{ + struct { + bool intercepts:1; /* 0: cr/dr/exception/general intercepts, + * pause_filter_count, tsc_offset */ + bool iopm:1; /* 1: iopm_base_pa, msrpm_base_pa */ + bool asid:1; /* 2: guest_asid */ + bool tpr:1; /* 3: vintr */ + bool np:1; /* 4: np_enable, h_cr3, g_pat */ + bool cr:1; /* 5: cr0, cr3, cr4, efer */ + bool dr:1; /* 6: dr6, dr7 */ + bool dt:1; /* 7: gdtr, idtr */ + bool seg:1; /* 8: cs, ds, es, ss, cpl */ + bool cr2:1; /* 9: cr2 */ + bool lbr:1; /* 10: debugctlmsr, last{branch,int}{to,from}ip */ + bool :1; + bool cet:1; /* 12: msr_s_set, ssp, msr_isst */ + }; + uint32_t raw; +} vmcbcleanbits_t; + +#define IOPM_SIZE (12 * 1024) +#define MSRPM_SIZE (8 * 1024) + +struct vmcb_struct { + u32 _cr_intercepts; /* offset 0x00 - cleanbit 0 */ + u32 _dr_intercepts; /* offset 0x04 - cleanbit 0 */ + u32 _exception_intercepts; /* offset 0x08 - cleanbit 0 */ + u32 _general1_intercepts; /* offset 0x0C - cleanbit 0 */ + u32 _general2_intercepts; /* offset 0x10 - cleanbit 0 */ + u32 res01[10]; + u16 _pause_filter_thresh; /* offset 0x3C - cleanbit 0 */ + u16 _pause_filter_count; /* offset 0x3E - cleanbit 0 */ + u64 _iopm_base_pa; /* offset 0x40 - cleanbit 1 */ + u64 _msrpm_base_pa; /* offset 0x48 - cleanbit 1 */ + u64 _tsc_offset; /* offset 0x50 - cleanbit 0 */ + u32 _guest_asid; /* offset 0x58 - cleanbit 2 */ + u8 tlb_control; /* offset 0x5C - TLB_CTRL_* */ + u8 res07[3]; + vintr_t _vintr; /* offset 0x60 - cleanbit 3 */ + intstat_t int_stat; /* offset 0x68 */ + u64 exitcode; /* offset 0x70 */ + union { + struct { + uint64_t exitinfo1; /* offset 0x78 */ + uint64_t exitinfo2; /* offset 0x80 */ + }; + union { + struct { + uint16_t sel; + uint64_t :48; + + uint32_t ec; + uint32_t :4; + bool iret:1; + uint32_t :1; + bool jmp:1; + uint32_t :5; + bool ev:1; + uint32_t :3; + bool rf:1; + } task_switch; + } ei; + }; + intinfo_t exit_int_info; /* offset 0x88 */ + union { /* offset 0x90 - cleanbit 4 */ + struct { + bool _np_enable :1; + bool _sev_enable :1; + bool _sev_es_enable :1; + bool _gmet :1; + bool _np_sss :1; + bool _vte :1; + }; + uint64_t _np_ctrl; + }; + u64 res08[2]; + intinfo_t event_inj; /* offset 0xA8 */ + u64 _h_cr3; /* offset 0xB0 - cleanbit 4 */ + virt_ext_t virt_ext; /* offset 0xB8 */ + vmcbcleanbits_t cleanbits; /* offset 0xC0 */ + u32 res09; /* offset 0xC4 */ + u64 nextrip; /* offset 0xC8 */ + u8 guest_ins_len; /* offset 0xD0 */ + u8 guest_ins[15]; /* offset 0xD1 */ + u64 res10a[100]; /* offset 0xE0 pad to save area */ + + union { + struct segment_register sreg[6]; + struct { + struct segment_register es; /* offset 0x400 - cleanbit 8 */ + struct segment_register cs; /* cleanbit 8 */ + struct segment_register ss; /* cleanbit 8 */ + struct segment_register ds; /* cleanbit 8 */ + struct segment_register fs; + struct segment_register gs; + }; + }; + struct segment_register gdtr; /* cleanbit 7 */ + struct segment_register ldtr; + struct segment_register idtr; /* cleanbit 7 */ + struct segment_register tr; + u64 res10[5]; + u8 res11[3]; + u8 _cpl; /* cleanbit 8 */ + u32 res12; + u64 _efer; /* offset 0x400 + 0xD0 - cleanbit 5 */ + u64 res13[14]; + u64 _cr4; /* offset 0x400 + 0x148 - cleanbit 5 */ + u64 _cr3; /* cleanbit 5 */ + u64 _cr0; /* cleanbit 5 */ + u64 _dr7; /* cleanbit 6 */ + u64 _dr6; /* cleanbit 6 */ + u64 rflags; + u64 rip; + u64 res14[11]; + u64 rsp; + u64 _msr_s_cet; /* offset 0x400 + 0x1E0 - cleanbit 12 */ + u64 _ssp; /* offset 0x400 + 0x1E8 | */ + u64 _msr_isst; /* offset 0x400 + 0x1F0 v */ + u64 rax; + u64 star; + u64 lstar; + u64 cstar; + u64 sfmask; + u64 kerngsbase; + u64 sysenter_cs; + u64 sysenter_esp; + u64 sysenter_eip; + u64 _cr2; /* cleanbit 9 */ + u64 res16[4]; + u64 _g_pat; /* cleanbit 4 */ + u64 _debugctlmsr; /* cleanbit 10 */ + u64 _lastbranchfromip; /* cleanbit 10 */ + u64 _lastbranchtoip; /* cleanbit 10 */ + u64 _lastintfromip; /* cleanbit 10 */ + u64 _lastinttoip; /* cleanbit 10 */ + u64 res17[9]; + u64 spec_ctrl; + u64 res18[291]; +}; + +struct svm_domain { + /* OSVW MSRs */ + union { + uint64_t raw[2]; + struct { + uint64_t length; + uint64_t status; + }; + } osvw; +}; + +/* + * VMRUN doesn't switch fs/gs/tr/ldtr and SHADOWGS/SYSCALL/SYSENTER state. + * Therefore, guest state is in the hardware registers when servicing a + * VMExit. + * + * Immediately after a VMExit, the vmcb is stale, and needs to be brought + * into sync by VMSAVE. If state in the vmcb is modified, a VMLOAD is + * needed before the following VMRUN. + */ +enum vmcb_sync_state { + vmcb_in_sync, + vmcb_needs_vmsave, /* VMCB out of sync (VMSAVE needed)? */ + vmcb_needs_vmload /* VMCB dirty (VMLOAD needed)? */ +}; + +struct svm_vcpu { + struct vmcb_struct *vmcb; + u64 vmcb_pa; + unsigned long *msrpm; + int launch_core; + + uint8_t vmcb_sync_state; /* enum vmcb_sync_state */ + + /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */ + uint8_t cached_insn_len; /* Zero if no cached instruction. */ + + /* Upper four bytes are undefined in the VMCB, therefore we can't + * use the fields in the VMCB. Write a 64bit value and then read a 64bit + * value is fine unless there's a VMRUN/VMEXIT in between which clears + * the upper four bytes. + */ + uint64_t guest_sysenter_cs; + uint64_t guest_sysenter_esp; + uint64_t guest_sysenter_eip; +}; + +struct vmcb_struct *alloc_vmcb(void); +void free_vmcb(struct vmcb_struct *vmcb); + +int svm_create_vmcb(struct vcpu *v); +void svm_destroy_vmcb(struct vcpu *v); + +void setup_vmcb_dump(void); + +#define MSR_INTERCEPT_NONE 0 +#define MSR_INTERCEPT_READ 1 +#define MSR_INTERCEPT_WRITE 2 +#define MSR_INTERCEPT_RW (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ) +void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable); +#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE) +#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW) + +/* + * VMCB accessor functions. + */ + +#define VMCB_ACCESSORS_(name, type, cleanbit) \ +static inline void \ +vmcb_set_ ## name(struct vmcb_struct *vmcb, \ + type value) \ +{ \ + vmcb->_ ## name = value; \ + vmcb->cleanbits.cleanbit = false; \ +} \ +static inline type \ +vmcb_get_ ## name(const struct vmcb_struct *vmcb) \ +{ \ + return vmcb->_ ## name; \ +} + +#define VMCB_ACCESSORS(name, cleanbit) \ + VMCB_ACCESSORS_(name, typeof(alloc_vmcb()->_ ## name), cleanbit) + +VMCB_ACCESSORS(cr_intercepts, intercepts) +VMCB_ACCESSORS(dr_intercepts, intercepts) +VMCB_ACCESSORS(exception_intercepts, intercepts) +VMCB_ACCESSORS(general1_intercepts, intercepts) +VMCB_ACCESSORS(general2_intercepts, intercepts) +VMCB_ACCESSORS(pause_filter_count, intercepts) +VMCB_ACCESSORS(pause_filter_thresh, intercepts) +VMCB_ACCESSORS(tsc_offset, intercepts) +VMCB_ACCESSORS(iopm_base_pa, iopm) +VMCB_ACCESSORS(msrpm_base_pa, iopm) +VMCB_ACCESSORS(guest_asid, asid) +VMCB_ACCESSORS(vintr, tpr) +VMCB_ACCESSORS(np_ctrl, np) +VMCB_ACCESSORS_(np_enable, bool, np) +VMCB_ACCESSORS_(sev_enable, bool, np) +VMCB_ACCESSORS_(sev_es_enable, bool, np) +VMCB_ACCESSORS_(gmet, bool, np) +VMCB_ACCESSORS_(vte, bool, np) +VMCB_ACCESSORS(h_cr3, np) +VMCB_ACCESSORS(g_pat, np) +VMCB_ACCESSORS(cr0, cr) +VMCB_ACCESSORS(cr3, cr) +VMCB_ACCESSORS(cr4, cr) +VMCB_ACCESSORS(efer, cr) +VMCB_ACCESSORS(dr6, dr) +VMCB_ACCESSORS(dr7, dr) +VMCB_ACCESSORS(cpl, seg) +VMCB_ACCESSORS(cr2, cr2) +VMCB_ACCESSORS(debugctlmsr, lbr) +VMCB_ACCESSORS(lastbranchfromip, lbr) +VMCB_ACCESSORS(lastbranchtoip, lbr) +VMCB_ACCESSORS(lastintfromip, lbr) +VMCB_ACCESSORS(lastinttoip, lbr) +VMCB_ACCESSORS(msr_s_cet, cet) +VMCB_ACCESSORS(ssp, cet) +VMCB_ACCESSORS(msr_isst, cet) + +#undef VMCB_ACCESSORS + +#endif /* ASM_X86_HVM_SVM_VMCS_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/trace.h b/xen/arch/x86/include/asm/hvm/trace.h new file mode 100644 index 0000000000..145b59f6ac --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/trace.h @@ -0,0 +1,114 @@ +#ifndef __ASM_X86_HVM_TRACE_H__ +#define __ASM_X86_HVM_TRACE_H__ + +#include + +#define DEFAULT_HVM_TRACE_ON 1 +#define DEFAULT_HVM_TRACE_OFF 0 + +#define DEFAULT_HVM_VMSWITCH DEFAULT_HVM_TRACE_ON +#define DEFAULT_HVM_PF DEFAULT_HVM_TRACE_ON +#define DEFAULT_HVM_INJECT DEFAULT_HVM_TRACE_ON +#define DEFAULT_HVM_IO DEFAULT_HVM_TRACE_ON +#define DEFAULT_HVM_REGACCESS DEFAULT_HVM_TRACE_ON +#define DEFAULT_HVM_MISC DEFAULT_HVM_TRACE_ON +#define DEFAULT_HVM_INTR DEFAULT_HVM_TRACE_ON + +#define DO_TRC_HVM_VMENTRY DEFAULT_HVM_VMSWITCH +#define DO_TRC_HVM_VMEXIT DEFAULT_HVM_VMSWITCH +#define DO_TRC_HVM_VMEXIT64 DEFAULT_HVM_VMSWITCH +#define DO_TRC_HVM_PF_XEN DEFAULT_HVM_PF +#define DO_TRC_HVM_PF_XEN64 DEFAULT_HVM_PF +#define DO_TRC_HVM_PF_INJECT DEFAULT_HVM_PF +#define DO_TRC_HVM_PF_INJECT64 DEFAULT_HVM_PF +#define DO_TRC_HVM_INJ_EXC DEFAULT_HVM_INJECT +#define DO_TRC_HVM_INJ_VIRQ DEFAULT_HVM_INJECT +#define DO_TRC_HVM_REINJ_VIRQ DEFAULT_HVM_INJECT +#define DO_TRC_HVM_INTR_WINDOW DEFAULT_HVM_INJECT +#define DO_TRC_HVM_IO_READ DEFAULT_HVM_IO +#define DO_TRC_HVM_IO_WRITE DEFAULT_HVM_IO +#define DO_TRC_HVM_CR_READ DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_CR_READ64 DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_CR_WRITE DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_CR_WRITE64 DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_DR_READ DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_DR_WRITE DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_XCR_READ64 DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_XCR_WRITE64 DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_MSR_READ DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_MSR_WRITE DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_RDTSC DEFAULT_HVM_REGACCESS +#define DO_TRC_HVM_CPUID DEFAULT_HVM_MISC +#define DO_TRC_HVM_INTR DEFAULT_HVM_INTR +#define DO_TRC_HVM_NMI DEFAULT_HVM_INTR +#define DO_TRC_HVM_MCE DEFAULT_HVM_INTR +#define DO_TRC_HVM_SMI DEFAULT_HVM_INTR +#define DO_TRC_HVM_VMMCALL DEFAULT_HVM_MISC +#define DO_TRC_HVM_HLT DEFAULT_HVM_MISC +#define DO_TRC_HVM_INVLPG DEFAULT_HVM_MISC +#define DO_TRC_HVM_INVLPG64 DEFAULT_HVM_MISC +#define DO_TRC_HVM_IO_ASSIST DEFAULT_HVM_MISC +#define DO_TRC_HVM_MMIO_ASSIST DEFAULT_HVM_MISC +#define DO_TRC_HVM_CLTS DEFAULT_HVM_MISC +#define DO_TRC_HVM_LMSW DEFAULT_HVM_MISC +#define DO_TRC_HVM_LMSW64 DEFAULT_HVM_MISC +#define DO_TRC_HVM_REALMODE_EMULATE DEFAULT_HVM_MISC +#define DO_TRC_HVM_TRAP DEFAULT_HVM_MISC +#define DO_TRC_HVM_TRAP_DEBUG DEFAULT_HVM_MISC +#define DO_TRC_HVM_VLAPIC DEFAULT_HVM_MISC + + +#define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32) + +#define TRACE_2_LONG_2D(_e, d1, d2, ...) \ + TRACE_4D(_e, d1, d2) +#define TRACE_2_LONG_3D(_e, d1, d2, d3, ...) \ + TRACE_5D(_e, d1, d2, d3) +#define TRACE_2_LONG_4D(_e, d1, d2, d3, d4, ...) \ + TRACE_6D(_e, d1, d2, d3, d4) + +#define HVMTRACE_ND(evt, modifier, cycles, ...) \ + do { \ + if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \ + { \ + uint32_t _d[] = { __VA_ARGS__ }; \ + __trace_var(TRC_HVM_ ## evt | (modifier), cycles, \ + sizeof(_d), sizeof(_d) ? _d : NULL); \ + } \ + } while(0) + +#define HVMTRACE_6D(evt, d1, d2, d3, d4, d5, d6) \ + HVMTRACE_ND(evt, 0, 0, d1, d2, d3, d4, d5, d6) +#define HVMTRACE_5D(evt, d1, d2, d3, d4, d5) \ + HVMTRACE_ND(evt, 0, 0, d1, d2, d3, d4, d5) +#define HVMTRACE_4D(evt, d1, d2, d3, d4) \ + HVMTRACE_ND(evt, 0, 0, d1, d2, d3, d4) +#define HVMTRACE_3D(evt, d1, d2, d3) \ + HVMTRACE_ND(evt, 0, 0, d1, d2, d3) +#define HVMTRACE_2D(evt, d1, d2) \ + HVMTRACE_ND(evt, 0, 0, d1, d2) +#define HVMTRACE_1D(evt, d1) \ + HVMTRACE_ND(evt, 0, 0, d1) +#define HVMTRACE_0D(evt) \ + HVMTRACE_ND(evt, 0, 0) + +#define HVMTRACE_LONG_1D(evt, d1) \ + HVMTRACE_2D(evt ## 64, (d1) & 0xFFFFFFFF, (d1) >> 32) +#define HVMTRACE_LONG_2D(evt, d1, d2, ...) \ + HVMTRACE_3D(evt ## 64, d1, d2) +#define HVMTRACE_LONG_3D(evt, d1, d2, d3, ...) \ + HVMTRACE_4D(evt ## 64, d1, d2, d3) +#define HVMTRACE_LONG_4D(evt, d1, d2, d3, d4, ...) \ + HVMTRACE_5D(evt ## 64, d1, d2, d3, d4) + +#endif /* __ASM_X86_HVM_TRACE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/vcpu.h b/xen/arch/x86/include/asm/hvm/vcpu.h new file mode 100644 index 0000000000..8adf4555c2 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vcpu.h @@ -0,0 +1,210 @@ +/* + * vcpu.h: HVM per vcpu definitions + * + * Copyright (c) 2005, International Business Machines Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_VCPU_H__ +#define __ASM_X86_HVM_VCPU_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct hvm_vcpu_asid { + uint64_t generation; + uint32_t asid; +}; + +/* + * We may read or write up to m512 as a number of device-model + * transactions. + */ +struct hvm_mmio_cache { + unsigned long gla; + unsigned int size; + uint8_t dir; + uint8_t buffer[64] __aligned(sizeof(long)); +}; + +struct hvm_vcpu_io { + /* + * HVM emulation: + * Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn. + * The latter is known to be an MMIO frame (not RAM). + * This translation is only valid for accesses as per @mmio_access. + */ + struct npfec mmio_access; + unsigned long mmio_gla; + unsigned long mmio_gpfn; + + /* + * We may need to handle up to 3 distinct memory accesses per + * instruction. + */ + struct hvm_mmio_cache mmio_cache[3]; + unsigned int mmio_cache_count; + + /* For retries we shouldn't re-fetch the instruction. */ + unsigned int mmio_insn_bytes; + unsigned char mmio_insn[16]; + struct hvmemul_cache *cache; + + /* + * For string instruction emulation we need to be able to signal a + * necessary retry through other than function return codes. + */ + bool_t mmio_retry; + + unsigned long msix_unmask_address; + unsigned long msix_snoop_address; + unsigned long msix_snoop_gpa; + + const struct g2m_ioport *g2m_ioport; +}; + +struct nestedvcpu { + bool_t nv_guestmode; /* vcpu in guestmode? */ + void *nv_vvmcx; /* l1 guest virtual VMCB/VMCS */ + void *nv_n1vmcx; /* VMCB/VMCS used to run l1 guest */ + void *nv_n2vmcx; /* shadow VMCB/VMCS used to run l2 guest */ + + uint64_t nv_vvmcxaddr; /* l1 guest physical address of nv_vvmcx */ + paddr_t nv_n1vmcx_pa; /* host physical address of nv_n1vmcx */ + paddr_t nv_n2vmcx_pa; /* host physical address of nv_n2vmcx */ + + /* SVM/VMX arch specific */ + union { + struct nestedsvm nsvm; + struct nestedvmx nvmx; + } u; + + bool_t nv_flushp2m; /* True, when p2m table must be flushed */ + struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */ + bool stale_np2m; /* True when p2m_base in VMCx02 is no longer valid */ + uint64_t np2m_generation; + + struct hvm_vcpu_asid nv_n2asid; + + bool_t nv_vmentry_pending; + bool_t nv_vmexit_pending; + bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */ + + /* Does l1 guest intercept io ports 0x80 and/or 0xED ? + * Useful to optimize io permission handling. + */ + bool_t nv_ioport80; + bool_t nv_ioportED; + + /* L2's control-resgister, just as the L2 sees them. */ + unsigned long guest_cr[5]; +}; + +#define vcpu_nestedhvm(v) ((v)->arch.hvm.nvcpu) + +struct altp2mvcpu { + /* + * #VE information page. This pointer being non-NULL indicates that a + * VMCS's VIRT_EXCEPTION_INFO field is pointing to the page, and an extra + * page reference is held. + */ + struct page_info *veinfo_pg; + uint16_t p2midx; /* alternate p2m index */ +}; + +#define vcpu_altp2m(v) ((v)->arch.hvm.avcpu) + +struct hvm_vcpu { + /* Guest control-register and EFER values, just as the guest sees them. */ + unsigned long guest_cr[5]; + unsigned long guest_efer; + + /* + * Processor-visible control-register values, while guest executes. + * CR0, CR4: Used as a cache of VMCS contents by VMX only. + * CR1, CR2: Never used (guest_cr[2] is always processor-visible CR2). + * CR3: Always used and kept up to date by paging subsystem. + */ + unsigned long hw_cr[5]; + + struct vlapic vlapic; + s64 cache_tsc_offset; + u64 guest_time; + + /* Lock and list for virtual platform timers. */ + spinlock_t tm_lock; + struct list_head tm_list; + + bool flag_dr_dirty; + bool debug_state_latch; + bool single_step; + struct { + bool enabled; + uint16_t p2midx; + } fast_single_step; + + /* (MFN) hypervisor page table */ + pagetable_t monitor_table; + + struct hvm_vcpu_asid n1asid; + + u64 msr_tsc_adjust; + + union { + struct vmx_vcpu vmx; + struct svm_vcpu svm; + }; + + struct tasklet assert_evtchn_irq_tasklet; + + struct nestedvcpu nvcpu; + + struct altp2mvcpu avcpu; + + struct mtrr_state mtrr; + u64 pat_cr; + + /* In mode delay_for_missed_ticks, VCPUs have differing guest times. */ + int64_t stime_offset; + + u8 evtchn_upcall_vector; + + /* Which cache mode is this VCPU in (CR0:CD/NW)? */ + u8 cache_mode; + + struct hvm_vcpu_io hvm_io; + + /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */ + struct x86_event inject_event; + + struct viridian_vcpu *viridian; +}; + +#endif /* __ASM_X86_HVM_VCPU_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/vioapic.h b/xen/arch/x86/include/asm/hvm/vioapic.h new file mode 100644 index 0000000000..36b64d20d6 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vioapic.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2001 MandrakeSoft S.A. + * + * MandrakeSoft S.A. + * 43, rue d'Aboukir + * 75002 Paris - France + * http://www.linux-mandrake.com/ + * http://www.mandrakesoft.com/ + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; If not, see . + */ + +#ifndef __ASM_X86_HVM_VIOAPIC_H__ +#define __ASM_X86_HVM_VIOAPIC_H__ + +#include +#include + +#define VIOAPIC_VERSION_ID 0x11 /* IOAPIC version */ + +#define VIOAPIC_EDGE_TRIG 0 +#define VIOAPIC_LEVEL_TRIG 1 + +#define VIOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000 +#define VIOAPIC_MEM_LENGTH 0x100 + +/* Direct registers. */ +#define VIOAPIC_REG_SELECT 0x00 +#define VIOAPIC_REG_WINDOW 0x10 +#define VIOAPIC_REG_EOI 0x40 + +/* Indirect registers. */ +#define VIOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ +#define VIOAPIC_REG_VERSION 0x01 +#define VIOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ +#define VIOAPIC_REG_RTE0 0x10 + +struct hvm_vioapic { + struct domain *domain; + uint32_t nr_pins; + unsigned int base_gsi; + union { + XEN_HVM_VIOAPIC(,); + struct hvm_hw_vioapic domU; + }; +}; + +#define domain_vioapic(d, i) ((d)->arch.hvm.vioapic[i]) +#define vioapic_domain(v) ((v)->domain) + +int vioapic_init(struct domain *d); +void vioapic_deinit(struct domain *d); +void vioapic_reset(struct domain *d); +void vioapic_irq_positive_edge(struct domain *d, unsigned int irq); +void vioapic_update_EOI(struct domain *d, u8 vector); + +int vioapic_get_mask(const struct domain *d, unsigned int gsi); +int vioapic_get_vector(const struct domain *d, unsigned int gsi); +int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi); + +#endif /* __ASM_X86_HVM_VIOAPIC_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/viridian.h b/xen/arch/x86/include/asm/hvm/viridian.h new file mode 100644 index 0000000000..4c8ff6e80b --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/viridian.h @@ -0,0 +1,112 @@ +/***************************************************************************** + * + * include/xen/viridian.h + * + * Copyright (c) 2008 Citrix Corp. + * + */ + +#ifndef __ASM_X86_HVM_VIRIDIAN_H__ +#define __ASM_X86_HVM_VIRIDIAN_H__ + +#include + +struct viridian_page +{ + union hv_vp_assist_page_msr msr; + void *ptr; +}; + +struct viridian_stimer { + struct vcpu *v; + struct timer timer; + union hv_stimer_config config; + uint64_t count; + uint64_t expiration; + bool started; +}; + +struct viridian_vcpu +{ + struct viridian_page vp_assist; + bool apic_assist_pending; + bool polled; + uint64_t scontrol; + uint64_t siefp; + struct viridian_page simp; + union hv_synic_sint sint[16]; + uint8_t vector_to_sintx[256]; + struct viridian_stimer stimer[4]; + unsigned int stimer_enabled; + unsigned int stimer_pending; + uint64_t crash_param[5]; +}; + +struct viridian_time_ref_count +{ + unsigned long flags; + +#define _TRC_accessed 0 +#define TRC_accessed (1 << _TRC_accessed) +#define _TRC_running 1 +#define TRC_running (1 << _TRC_running) + + uint64_t val; + int64_t off; +}; + +enum { + _HCALL_spin_wait, + _HCALL_flush, + _HCALL_flush_ex, + _HCALL_ipi, + _HCALL_ipi_ex, + _HCALL_nr /* must be last */ +}; + +struct viridian_domain +{ + union hv_guest_os_id guest_os_id; + union hv_vp_assist_page_msr hypercall_gpa; + DECLARE_BITMAP(hypercall_flags, _HCALL_nr); + struct viridian_time_ref_count time_ref_count; + struct viridian_page reference_tsc; +}; + +void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, + uint32_t subleaf, struct cpuid_leaf *res); + +int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val); +int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val); + +int +viridian_hypercall(struct cpu_user_regs *regs); + +void viridian_time_domain_freeze(const struct domain *d); +void viridian_time_domain_thaw(const struct domain *d); + +int viridian_vcpu_init(struct vcpu *v); +int viridian_domain_init(struct domain *d); + +void viridian_vcpu_deinit(struct vcpu *v); +void viridian_domain_deinit(struct domain *d); + +void viridian_apic_assist_set(const struct vcpu *v); +bool viridian_apic_assist_completed(const struct vcpu *v); +void viridian_apic_assist_clear(const struct vcpu *v); + +void viridian_synic_poll(struct vcpu *v); +bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v, + unsigned int vector); + +#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/vlapic.h b/xen/arch/x86/include/asm/hvm/vlapic.h new file mode 100644 index 0000000000..8f908928c3 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vlapic.h @@ -0,0 +1,157 @@ +/* + * hvm_vlapic.h: virtualize LAPIC definitions. + * + * Copyright (c) 2004, Intel Corporation. + * Copyright (c) 2006 Keir Fraser, XenSource Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_VLAPIC_H__ +#define __ASM_X86_HVM_VLAPIC_H__ + +#include +#include + +#define vcpu_vlapic(x) (&(x)->arch.hvm.vlapic) +#define vlapic_vcpu(x) (container_of((x), struct vcpu, arch.hvm.vlapic)) +#define const_vlapic_vcpu(x) (container_of((x), const struct vcpu, \ + arch.hvm.vlapic)) +#define vlapic_domain(x) (vlapic_vcpu(x)->domain) + +#define _VLAPIC_ID(vlapic, id) (vlapic_x2apic_mode(vlapic) \ + ? (id) : GET_xAPIC_ID(id)) +#define VLAPIC_ID(vlapic) _VLAPIC_ID(vlapic, vlapic_get_reg(vlapic, APIC_ID)) + +/* + * APIC can be disabled in two ways: + * 1. 'Hardware disable': via IA32_APIC_BASE_MSR[11] + * CPU should behave as if it does not have an APIC. + * 2. 'Software disable': via APIC_SPIV[8]. + * APIC is visible but does not respond to interrupt messages. + */ +#define VLAPIC_HW_DISABLED 0x1 +#define VLAPIC_SW_DISABLED 0x2 +#define vlapic_sw_disabled(vlapic) ((vlapic)->hw.disabled & VLAPIC_SW_DISABLED) +#define vlapic_hw_disabled(vlapic) ((vlapic)->hw.disabled & VLAPIC_HW_DISABLED) +#define vlapic_disabled(vlapic) ((vlapic)->hw.disabled) +#define vlapic_enabled(vlapic) (!vlapic_disabled(vlapic)) + +#define vlapic_base_address(vlapic) \ + ((vlapic)->hw.apic_base_msr & APIC_BASE_ADDR_MASK) +/* Only check EXTD bit as EXTD can't be set if it is disabled by hardware */ +#define vlapic_x2apic_mode(vlapic) \ + ((vlapic)->hw.apic_base_msr & APIC_BASE_EXTD) +#define vlapic_xapic_mode(vlapic) \ + (!vlapic_hw_disabled(vlapic) && \ + !((vlapic)->hw.apic_base_msr & APIC_BASE_EXTD)) + +/* + * Generic APIC bitmap vector update & search routines. + */ + +#define VEC_POS(v) ((v) % 32) +#define REG_POS(v) (((v) / 32) * 0x10) +#define vlapic_test_vector(vec, bitmap) \ + test_bit(VEC_POS(vec), (const uint32_t *)((bitmap) + REG_POS(vec))) +#define vlapic_test_and_set_vector(vec, bitmap) \ + test_and_set_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) +#define vlapic_test_and_clear_vector(vec, bitmap) \ + test_and_clear_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) +#define vlapic_set_vector(vec, bitmap) \ + set_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) +#define vlapic_clear_vector(vec, bitmap) \ + clear_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) + +struct vlapic { + struct hvm_hw_lapic hw; + struct hvm_hw_lapic_regs *regs; + struct { + bool_t hw, regs; + uint32_t id, ldr; + } loaded; + spinlock_t esr_lock; + struct periodic_time pt; + s_time_t timer_last_update; + struct page_info *regs_page; + /* INIT-SIPI-SIPI work gets deferred to a tasklet. */ + struct { + uint32_t icr, dest; + struct tasklet tasklet; + } init_sipi; +}; + +/* vlapic's frequence is 100 MHz */ +#define APIC_BUS_CYCLE_NS 10 + +static inline uint32_t vlapic_get_reg(const struct vlapic *vlapic, + uint32_t reg) +{ + return *((uint32_t *)(&vlapic->regs->data[reg])); +} + +static inline void vlapic_set_reg( + struct vlapic *vlapic, uint32_t reg, uint32_t val) +{ + *((uint32_t *)(&vlapic->regs->data[reg])) = val; +} + +void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val); + +bool_t is_vlapic_lvtpc_enabled(struct vlapic *vlapic); + +bool vlapic_test_irq(const struct vlapic *vlapic, uint8_t vec); +void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig); + +int vlapic_has_pending_irq(struct vcpu *v); +int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack); + +int vlapic_init(struct vcpu *v); +void vlapic_destroy(struct vcpu *v); + +void vlapic_reset(struct vlapic *vlapic); + +int guest_wrmsr_apic_base(struct vcpu *v, uint64_t val); +int guest_rdmsr_x2apic(const struct vcpu *v, uint32_t msr, uint64_t *val); +int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t val); + +void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value); +uint64_t vlapic_tdt_msr_get(struct vlapic *vlapic); + +int vlapic_accept_pic_intr(struct vcpu *v); +uint32_t vlapic_set_ppr(struct vlapic *vlapic); + +void vlapic_adjust_i8259_target(struct domain *d); + +void vlapic_EOI_set(struct vlapic *vlapic); +void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector); + +void vlapic_ipi(struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high); + +int vlapic_apicv_write(struct vcpu *v, unsigned int offset); + +struct vlapic *vlapic_lowest_prio( + struct domain *d, const struct vlapic *source, + int short_hand, uint32_t dest, bool_t dest_mode); + +bool_t vlapic_match_dest( + const struct vlapic *target, const struct vlapic *source, + int short_hand, uint32_t dest, bool_t dest_mode); + +static inline void vlapic_sync_pir_to_irr(struct vcpu *v) +{ + if ( hvm_funcs.sync_pir_to_irr ) + alternative_vcall(hvm_funcs.sync_pir_to_irr, v); +} + +#endif /* __ASM_X86_HVM_VLAPIC_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/vm_event.h b/xen/arch/x86/include/asm/hvm/vm_event.h new file mode 100644 index 0000000000..28cb07ce8f --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vm_event.h @@ -0,0 +1,34 @@ +/* + * include/asm-x86/hvm/vm_event.h + * + * Hardware virtual machine vm_event abstractions. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_VM_EVENT_H__ +#define __ASM_X86_HVM_VM_EVENT_H__ + +void hvm_vm_event_do_resume(struct vcpu *v); + +#endif /* __ASM_X86_HVM_VM_EVENT_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h new file mode 100644 index 0000000000..03c9ccf627 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h @@ -0,0 +1,688 @@ +/* + * vmcs.h: VMCS related definitions + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ +#ifndef __ASM_X86_HVM_VMX_VMCS_H__ +#define __ASM_X86_HVM_VMX_VMCS_H__ + +#include + +extern void vmcs_dump_vcpu(struct vcpu *v); +extern int vmx_vmcs_init(void); +extern int vmx_cpu_up_prepare(unsigned int cpu); +extern void vmx_cpu_dead(unsigned int cpu); +extern int vmx_cpu_up(void); +extern void vmx_cpu_down(void); + +struct vmcs_struct { + u32 vmcs_revision_id; + unsigned char data [0]; /* vmcs size is read from MSR */ +}; + +struct vmx_msr_entry { + u32 index; + u32 mbz; + u64 data; +}; + +#define EPT_DEFAULT_MT MTRR_TYPE_WRBACK + +struct ept_data { + union { + struct { + uint64_t mt:3, /* Memory Type. */ + wl:3, /* Walk length -1. */ + ad:1, /* Enable EPT A/D bits. */ + :5, /* rsvd. */ + mfn:52; + }; + u64 eptp; + }; + /* Set of PCPUs needing an INVEPT before a VMENTER. */ + cpumask_var_t invalidate; +}; + +#define _VMX_DOMAIN_PML_ENABLED 0 +#define VMX_DOMAIN_PML_ENABLED (1ul << _VMX_DOMAIN_PML_ENABLED) +struct vmx_domain { + /* VMX_DOMAIN_* */ + unsigned int status; + + /* + * Domain permitted to use Executable EPT Superpages? Cleared to work + * around CVE-2018-12207 as appropriate. + */ + bool exec_sp; +}; + +/* + * Layout of the MSR bitmap, as interpreted by hardware: + * - *_low covers MSRs 0 to 0x1fff + * - *_ligh covers MSRs 0xc0000000 to 0xc0001fff + */ +struct vmx_msr_bitmap { + unsigned long read_low [0x2000 / BITS_PER_LONG]; + unsigned long read_high [0x2000 / BITS_PER_LONG]; + unsigned long write_low [0x2000 / BITS_PER_LONG]; + unsigned long write_high[0x2000 / BITS_PER_LONG]; +}; + +struct pi_desc { + DECLARE_BITMAP(pir, X86_NR_VECTORS); + union { + struct { + u16 on : 1, /* bit 256 - Outstanding Notification */ + sn : 1, /* bit 257 - Suppress Notification */ + rsvd_1 : 14; /* bit 271:258 - Reserved */ + u8 nv; /* bit 279:272 - Notification Vector */ + u8 rsvd_2; /* bit 287:280 - Reserved */ + u32 ndst; /* bit 319:288 - Notification Destination */ + }; + u64 control; + }; + u32 rsvd[6]; +} __attribute__ ((aligned (64))); + +#define NR_PML_ENTRIES 512 + +struct pi_blocking_vcpu { + struct list_head list; + spinlock_t *lock; +}; + +struct vmx_vcpu { + /* Physical address of VMCS. */ + paddr_t vmcs_pa; + /* VMCS shadow machine address. */ + paddr_t vmcs_shadow_maddr; + + /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */ + spinlock_t vmcs_lock; + + /* + * Activation and launch status of this VMCS. + * - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR. + * - Launched on active CPU by VMLAUNCH when current VMCS. + */ + struct list_head active_list; + int active_cpu; + int launched; + + /* Cache of cpu execution control. */ + u32 exec_control; + u32 secondary_exec_control; + u32 exception_bitmap; + + uint64_t shadow_gs; + uint64_t star; + uint64_t lstar; + uint64_t cstar; + uint64_t sfmask; + + struct vmx_msr_bitmap *msr_bitmap; + + /* + * Most accesses to the MSR host/guest load/save lists are in current + * context. However, the data can be modified by toolstack/migration + * actions. Remote access is only permitted for paused vcpus, and is + * protected under the domctl lock. + */ + struct vmx_msr_entry *msr_area; + struct vmx_msr_entry *host_msr_area; + unsigned int msr_load_count; + unsigned int msr_save_count; + unsigned int host_msr_count; + + unsigned long eoi_exitmap_changed; + DECLARE_BITMAP(eoi_exit_bitmap, X86_NR_VECTORS); + struct pi_desc pi_desc; + + unsigned long host_cr0; + + /* Do we need to tolerate a spurious EPT_MISCONFIG VM exit? */ + bool_t ept_spurious_misconfig; + + /* Processor Trace configured and enabled for the vcpu. */ + bool ipt_active; + + /* Is the guest in real mode? */ + uint8_t vmx_realmode; + /* Are we emulating rather than VMENTERing? */ + uint8_t vmx_emulate; + + uint8_t lbr_flags; + + /* Bitmask of segments that we can't safely use in virtual 8086 mode */ + uint16_t vm86_segment_mask; + /* Shadow CS, SS, DS, ES, FS, GS, TR while in virtual 8086 mode */ + struct segment_register vm86_saved_seg[x86_seg_tr + 1]; + /* Remember EFLAGS while in virtual 8086 mode */ + uint32_t vm86_saved_eflags; + int hostenv_migrated; + + /* Bitmap to control vmexit policy for Non-root VMREAD/VMWRITE */ + struct page_info *vmread_bitmap; + struct page_info *vmwrite_bitmap; + + struct page_info *pml_pg; + + /* Bitmask of trapped CR4 bits. */ + unsigned long cr4_host_mask; + + /* + * Before it is blocked, vCPU is added to the per-cpu list. + * VT-d engine can send wakeup notification event to the + * pCPU and wakeup the related vCPU. + */ + struct pi_blocking_vcpu pi_blocking; +}; + +int vmx_create_vmcs(struct vcpu *v); +void vmx_destroy_vmcs(struct vcpu *v); +void vmx_vmcs_enter(struct vcpu *v); +bool_t __must_check vmx_vmcs_try_enter(struct vcpu *v); +void vmx_vmcs_exit(struct vcpu *v); +void vmx_vmcs_reload(struct vcpu *v); + +#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 +#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 +#define CPU_BASED_HLT_EXITING 0x00000080 +#define CPU_BASED_INVLPG_EXITING 0x00000200 +#define CPU_BASED_MWAIT_EXITING 0x00000400 +#define CPU_BASED_RDPMC_EXITING 0x00000800 +#define CPU_BASED_RDTSC_EXITING 0x00001000 +#define CPU_BASED_CR3_LOAD_EXITING 0x00008000 +#define CPU_BASED_CR3_STORE_EXITING 0x00010000 +#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 +#define CPU_BASED_CR8_STORE_EXITING 0x00100000 +#define CPU_BASED_TPR_SHADOW 0x00200000 +#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 +#define CPU_BASED_MOV_DR_EXITING 0x00800000 +#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 +#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 +#define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 +#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000 +#define CPU_BASED_MONITOR_EXITING 0x20000000 +#define CPU_BASED_PAUSE_EXITING 0x40000000 +#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 +extern u32 vmx_cpu_based_exec_control; + +#define PIN_BASED_EXT_INTR_MASK 0x00000001 +#define PIN_BASED_NMI_EXITING 0x00000008 +#define PIN_BASED_VIRTUAL_NMIS 0x00000020 +#define PIN_BASED_PREEMPT_TIMER 0x00000040 +#define PIN_BASED_POSTED_INTERRUPT 0x00000080 +extern u32 vmx_pin_based_exec_control; + +#define VM_EXIT_SAVE_DEBUG_CNTRLS 0x00000004 +#define VM_EXIT_IA32E_MODE 0x00000200 +#define VM_EXIT_LOAD_PERF_GLOBAL_CTRL 0x00001000 +#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 +#define VM_EXIT_SAVE_GUEST_PAT 0x00040000 +#define VM_EXIT_LOAD_HOST_PAT 0x00080000 +#define VM_EXIT_SAVE_GUEST_EFER 0x00100000 +#define VM_EXIT_LOAD_HOST_EFER 0x00200000 +#define VM_EXIT_SAVE_PREEMPT_TIMER 0x00400000 +#define VM_EXIT_CLEAR_BNDCFGS 0x00800000 +extern u32 vmx_vmexit_control; + +#define VM_ENTRY_IA32E_MODE 0x00000200 +#define VM_ENTRY_SMM 0x00000400 +#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 +#define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL 0x00002000 +#define VM_ENTRY_LOAD_GUEST_PAT 0x00004000 +#define VM_ENTRY_LOAD_GUEST_EFER 0x00008000 +#define VM_ENTRY_LOAD_BNDCFGS 0x00010000 +extern u32 vmx_vmentry_control; + +#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 +#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 +#define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004 +#define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008 +#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 +#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 +#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 +#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 +#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 +#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 +#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 +#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 +#define SECONDARY_EXEC_ENABLE_VM_FUNCTIONS 0x00002000 +#define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING 0x00004000 +#define SECONDARY_EXEC_ENABLE_PML 0x00020000 +#define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS 0x00040000 +#define SECONDARY_EXEC_XSAVES 0x00100000 +#define SECONDARY_EXEC_TSC_SCALING 0x02000000 +extern u32 vmx_secondary_exec_control; + +#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001 +#define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040 +#define VMX_EPT_MEMORY_TYPE_UC 0x00000100 +#define VMX_EPT_MEMORY_TYPE_WB 0x00004000 +#define VMX_EPT_SUPERPAGE_2MB 0x00010000 +#define VMX_EPT_SUPERPAGE_1GB 0x00020000 +#define VMX_EPT_INVEPT_INSTRUCTION 0x00100000 +#define VMX_EPT_AD_BIT 0x00200000 +#define VMX_EPT_INVEPT_SINGLE_CONTEXT 0x02000000 +#define VMX_EPT_INVEPT_ALL_CONTEXT 0x04000000 +#define VMX_VPID_INVVPID_INSTRUCTION 0x00100000000ULL +#define VMX_VPID_INVVPID_INDIVIDUAL_ADDR 0x10000000000ULL +#define VMX_VPID_INVVPID_SINGLE_CONTEXT 0x20000000000ULL +#define VMX_VPID_INVVPID_ALL_CONTEXT 0x40000000000ULL +#define VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 0x80000000000ULL +extern u64 vmx_ept_vpid_cap; + +#define VMX_MISC_PROC_TRACE 0x00004000 +#define VMX_MISC_CR3_TARGET 0x01ff0000 +#define VMX_MISC_VMWRITE_ALL 0x20000000 + +#define VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL + +#define cpu_has_wbinvd_exiting \ + (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING) +#define cpu_has_vmx_virtualize_apic_accesses \ + (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) +#define cpu_has_vmx_tpr_shadow \ + (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) +#define cpu_has_vmx_vnmi \ + (vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS) +#define cpu_has_vmx_msr_bitmap \ + (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP) +#define cpu_has_vmx_secondary_exec_control \ + (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) +#define cpu_has_vmx_ept \ + (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) +#define cpu_has_vmx_dt_exiting \ + (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING) +#define cpu_has_vmx_vpid \ + (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) +#define cpu_has_monitor_trap_flag \ + (vmx_cpu_based_exec_control & CPU_BASED_MONITOR_TRAP_FLAG) +#define cpu_has_vmx_pat \ + (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_PAT) +#define cpu_has_vmx_efer \ + (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_EFER) +#define cpu_has_vmx_unrestricted_guest \ + (vmx_secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST) +#define vmx_unrestricted_guest(v) \ + ((v)->arch.hvm.vmx.secondary_exec_control & \ + SECONDARY_EXEC_UNRESTRICTED_GUEST) +#define cpu_has_vmx_ple \ + (vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) +#define cpu_has_vmx_apic_reg_virt \ + (vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT) +#define cpu_has_vmx_virtual_intr_delivery \ + (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) +#define cpu_has_vmx_virtualize_x2apic_mode \ + (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) +#define cpu_has_vmx_posted_intr_processing \ + (vmx_pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT) +#define cpu_has_vmx_vmcs_shadowing \ + (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VMCS_SHADOWING) +#define cpu_has_vmx_vmfunc \ + (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) +#define cpu_has_vmx_virt_exceptions \ + (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS) +#define cpu_has_vmx_pml \ + (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML) +#define cpu_has_vmx_mpx \ + ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \ + (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS)) +#define cpu_has_vmx_xsaves \ + (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES) +#define cpu_has_vmx_tsc_scaling \ + (vmx_secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) + +#define VMCS_RID_TYPE_MASK 0x80000000 + +/* GUEST_INTERRUPTIBILITY_INFO flags. */ +#define VMX_INTR_SHADOW_STI 0x00000001 +#define VMX_INTR_SHADOW_MOV_SS 0x00000002 +#define VMX_INTR_SHADOW_SMI 0x00000004 +#define VMX_INTR_SHADOW_NMI 0x00000008 + +#define VMX_BASIC_REVISION_MASK 0x7fffffff +#define VMX_BASIC_VMCS_SIZE_MASK (0x1fffULL << 32) +#define VMX_BASIC_32BIT_ADDRESSES (1ULL << 48) +#define VMX_BASIC_DUAL_MONITOR (1ULL << 49) +#define VMX_BASIC_MEMORY_TYPE_MASK (0xfULL << 50) +#define VMX_BASIC_INS_OUT_INFO (1ULL << 54) +/* + * bit 55 of IA32_VMX_BASIC MSR, indicating whether any VMX controls that + * default to 1 may be cleared to 0. + */ +#define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55) + +extern u64 vmx_basic_msr; +#define cpu_has_vmx_ins_outs_instr_info \ + (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO)) + +/* Guest interrupt status */ +#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF +#define VMX_GUEST_INTR_STATUS_SVI_OFFSET 8 + +/* VMFUNC leaf definitions */ +#define VMX_VMFUNC_EPTP_SWITCHING (1ULL << 0) + +/* VMCS field encodings. */ +#define VMCS_HIGH(x) ((x) | 1) +enum vmcs_field { + VIRTUAL_PROCESSOR_ID = 0x00000000, + POSTED_INTR_NOTIFICATION_VECTOR = 0x00000002, + EPTP_INDEX = 0x00000004, +#define GUEST_SEG_SELECTOR(sel) (GUEST_ES_SELECTOR + (sel) * 2) /* ES ... GS */ + GUEST_ES_SELECTOR = 0x00000800, + GUEST_CS_SELECTOR = 0x00000802, + GUEST_SS_SELECTOR = 0x00000804, + GUEST_DS_SELECTOR = 0x00000806, + GUEST_FS_SELECTOR = 0x00000808, + GUEST_GS_SELECTOR = 0x0000080a, + GUEST_LDTR_SELECTOR = 0x0000080c, + GUEST_TR_SELECTOR = 0x0000080e, + GUEST_INTR_STATUS = 0x00000810, + GUEST_PML_INDEX = 0x00000812, + HOST_ES_SELECTOR = 0x00000c00, + HOST_CS_SELECTOR = 0x00000c02, + HOST_SS_SELECTOR = 0x00000c04, + HOST_DS_SELECTOR = 0x00000c06, + HOST_FS_SELECTOR = 0x00000c08, + HOST_GS_SELECTOR = 0x00000c0a, + HOST_TR_SELECTOR = 0x00000c0c, + IO_BITMAP_A = 0x00002000, + IO_BITMAP_B = 0x00002002, + MSR_BITMAP = 0x00002004, + VM_EXIT_MSR_STORE_ADDR = 0x00002006, + VM_EXIT_MSR_LOAD_ADDR = 0x00002008, + VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, + PML_ADDRESS = 0x0000200e, + TSC_OFFSET = 0x00002010, + VIRTUAL_APIC_PAGE_ADDR = 0x00002012, + APIC_ACCESS_ADDR = 0x00002014, + PI_DESC_ADDR = 0x00002016, + VM_FUNCTION_CONTROL = 0x00002018, + EPT_POINTER = 0x0000201a, + EOI_EXIT_BITMAP0 = 0x0000201c, +#define EOI_EXIT_BITMAP(n) (EOI_EXIT_BITMAP0 + (n) * 2) /* n = 0...3 */ + EPTP_LIST_ADDR = 0x00002024, + VMREAD_BITMAP = 0x00002026, + VMWRITE_BITMAP = 0x00002028, + VIRT_EXCEPTION_INFO = 0x0000202a, + XSS_EXIT_BITMAP = 0x0000202c, + TSC_MULTIPLIER = 0x00002032, + GUEST_PHYSICAL_ADDRESS = 0x00002400, + VMCS_LINK_POINTER = 0x00002800, + GUEST_IA32_DEBUGCTL = 0x00002802, + GUEST_PAT = 0x00002804, + GUEST_EFER = 0x00002806, + GUEST_PERF_GLOBAL_CTRL = 0x00002808, + GUEST_PDPTE0 = 0x0000280a, +#define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */ + GUEST_BNDCFGS = 0x00002812, + HOST_PAT = 0x00002c00, + HOST_EFER = 0x00002c02, + HOST_PERF_GLOBAL_CTRL = 0x00002c04, + PIN_BASED_VM_EXEC_CONTROL = 0x00004000, + CPU_BASED_VM_EXEC_CONTROL = 0x00004002, + EXCEPTION_BITMAP = 0x00004004, + PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, + PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, + CR3_TARGET_COUNT = 0x0000400a, + VM_EXIT_CONTROLS = 0x0000400c, + VM_EXIT_MSR_STORE_COUNT = 0x0000400e, + VM_EXIT_MSR_LOAD_COUNT = 0x00004010, + VM_ENTRY_CONTROLS = 0x00004012, + VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, + VM_ENTRY_INTR_INFO = 0x00004016, + VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, + VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, + TPR_THRESHOLD = 0x0000401c, + SECONDARY_VM_EXEC_CONTROL = 0x0000401e, + PLE_GAP = 0x00004020, + PLE_WINDOW = 0x00004022, + VM_INSTRUCTION_ERROR = 0x00004400, + VM_EXIT_REASON = 0x00004402, + VM_EXIT_INTR_INFO = 0x00004404, + VM_EXIT_INTR_ERROR_CODE = 0x00004406, + IDT_VECTORING_INFO = 0x00004408, + IDT_VECTORING_ERROR_CODE = 0x0000440a, + VM_EXIT_INSTRUCTION_LEN = 0x0000440c, + VMX_INSTRUCTION_INFO = 0x0000440e, +#define GUEST_SEG_LIMIT(sel) (GUEST_ES_LIMIT + (sel) * 2) /* ES ... GS */ + GUEST_ES_LIMIT = 0x00004800, + GUEST_CS_LIMIT = 0x00004802, + GUEST_SS_LIMIT = 0x00004804, + GUEST_DS_LIMIT = 0x00004806, + GUEST_FS_LIMIT = 0x00004808, + GUEST_GS_LIMIT = 0x0000480a, + GUEST_LDTR_LIMIT = 0x0000480c, + GUEST_TR_LIMIT = 0x0000480e, + GUEST_GDTR_LIMIT = 0x00004810, + GUEST_IDTR_LIMIT = 0x00004812, +#define GUEST_SEG_AR_BYTES(sel) (GUEST_ES_AR_BYTES + (sel) * 2) /* ES ... GS */ + GUEST_ES_AR_BYTES = 0x00004814, + GUEST_CS_AR_BYTES = 0x00004816, + GUEST_SS_AR_BYTES = 0x00004818, + GUEST_DS_AR_BYTES = 0x0000481a, + GUEST_FS_AR_BYTES = 0x0000481c, + GUEST_GS_AR_BYTES = 0x0000481e, + GUEST_LDTR_AR_BYTES = 0x00004820, + GUEST_TR_AR_BYTES = 0x00004822, + GUEST_INTERRUPTIBILITY_INFO = 0x00004824, + GUEST_ACTIVITY_STATE = 0x00004826, + GUEST_SMBASE = 0x00004828, + GUEST_SYSENTER_CS = 0x0000482a, + GUEST_PREEMPTION_TIMER = 0x0000482e, + HOST_SYSENTER_CS = 0x00004c00, + CR0_GUEST_HOST_MASK = 0x00006000, + CR4_GUEST_HOST_MASK = 0x00006002, + CR0_READ_SHADOW = 0x00006004, + CR4_READ_SHADOW = 0x00006006, + CR3_TARGET_VALUE0 = 0x00006008, +#define CR3_TARGET_VALUE(n) (CR3_TARGET_VALUE0 + (n) * 2) /* n < CR3_TARGET_COUNT */ + EXIT_QUALIFICATION = 0x00006400, + GUEST_LINEAR_ADDRESS = 0x0000640a, + GUEST_CR0 = 0x00006800, + GUEST_CR3 = 0x00006802, + GUEST_CR4 = 0x00006804, +#define GUEST_SEG_BASE(sel) (GUEST_ES_BASE + (sel) * 2) /* ES ... GS */ + GUEST_ES_BASE = 0x00006806, + GUEST_CS_BASE = 0x00006808, + GUEST_SS_BASE = 0x0000680a, + GUEST_DS_BASE = 0x0000680c, + GUEST_FS_BASE = 0x0000680e, + GUEST_GS_BASE = 0x00006810, + GUEST_LDTR_BASE = 0x00006812, + GUEST_TR_BASE = 0x00006814, + GUEST_GDTR_BASE = 0x00006816, + GUEST_IDTR_BASE = 0x00006818, + GUEST_DR7 = 0x0000681a, + GUEST_RSP = 0x0000681c, + GUEST_RIP = 0x0000681e, + GUEST_RFLAGS = 0x00006820, + GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, + GUEST_SYSENTER_ESP = 0x00006824, + GUEST_SYSENTER_EIP = 0x00006826, + HOST_CR0 = 0x00006c00, + HOST_CR3 = 0x00006c02, + HOST_CR4 = 0x00006c04, + HOST_FS_BASE = 0x00006c06, + HOST_GS_BASE = 0x00006c08, + HOST_TR_BASE = 0x00006c0a, + HOST_GDTR_BASE = 0x00006c0c, + HOST_IDTR_BASE = 0x00006c0e, + HOST_SYSENTER_ESP = 0x00006c10, + HOST_SYSENTER_EIP = 0x00006c12, + HOST_RSP = 0x00006c14, + HOST_RIP = 0x00006c16, +}; + +#define VMCS_VPID_WIDTH 16 + +/* VM Instruction error numbers */ +enum vmx_insn_errno +{ + VMX_INSN_SUCCEED = 0, + VMX_INSN_VMCLEAR_INVALID_PHYADDR = 2, + VMX_INSN_VMCLEAR_WITH_VMXON_PTR = 3, + VMX_INSN_VMLAUNCH_NONCLEAR_VMCS = 4, + VMX_INSN_VMRESUME_NONLAUNCHED_VMCS = 5, + VMX_INSN_INVALID_CONTROL_STATE = 7, + VMX_INSN_INVALID_HOST_STATE = 8, + VMX_INSN_VMPTRLD_INVALID_PHYADDR = 9, + VMX_INSN_VMPTRLD_WITH_VMXON_PTR = 10, + VMX_INSN_VMPTRLD_INCORRECT_VMCS_ID = 11, + VMX_INSN_UNSUPPORTED_VMCS_COMPONENT = 12, + VMX_INSN_VMXON_IN_VMX_ROOT = 15, + VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS = 26, + VMX_INSN_INVEPT_INVVPID_INVALID_OP = 28, + VMX_INSN_FAIL_INVALID = ~0, +}; + +/* MSR load/save list infrastructure. */ +enum vmx_msr_list_type { + VMX_MSR_HOST, /* MSRs loaded on VMExit. */ + VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */ + VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only. */ +}; + +/** + * Add an MSR to an MSR list (inserting space for the entry if necessary), and + * set the MSRs value. + * + * It is undefined behaviour to try and insert the same MSR into both the + * GUEST and GUEST_LOADONLY list. + * + * May fail if unable to allocate memory for the list, or the total number of + * entries exceeds the memory allocated. + */ +int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, + enum vmx_msr_list_type type); + +/** + * Remove an MSR entry from an MSR list. Returns -ESRCH if the MSR was not + * found in the list. + */ +int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type); + +static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr, uint64_t val) +{ + return vmx_add_msr(v, msr, val, VMX_MSR_GUEST); +} +static inline int vmx_add_host_load_msr(struct vcpu *v, uint32_t msr, + uint64_t val) +{ + return vmx_add_msr(v, msr, val, VMX_MSR_HOST); +} + +struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr, + enum vmx_msr_list_type type); + +static inline int vmx_read_guest_msr(const struct vcpu *v, uint32_t msr, + uint64_t *val) +{ + const struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST); + + if ( !ent ) + { + *val = 0; + return -ESRCH; + } + + *val = ent->data; + + return 0; +} + +static inline int vmx_read_guest_loadonly_msr( + const struct vcpu *v, uint32_t msr, uint64_t *val) +{ + const struct vmx_msr_entry *ent = + vmx_find_msr(v, msr, VMX_MSR_GUEST_LOADONLY); + + if ( !ent ) + { + *val = 0; + return -ESRCH; + } + + *val = ent->data; + + return 0; +} + +static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr, + uint64_t val) +{ + struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST); + + if ( !ent ) + return -ESRCH; + + ent->data = val; + + return 0; +} + + +/* MSR intercept bitmap infrastructure. */ +enum vmx_msr_intercept_type { + VMX_MSR_R = 1, + VMX_MSR_W = 2, + VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W, +}; + +void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, + enum vmx_msr_intercept_type type); +void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, + enum vmx_msr_intercept_type type); +void vmx_vmcs_switch(paddr_t from, paddr_t to); +void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector); +void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector); +bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap, + unsigned int msr, bool is_write) __nonnull(1); +void virtual_vmcs_enter(const struct vcpu *); +void virtual_vmcs_exit(const struct vcpu *); +u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding); +enum vmx_insn_errno virtual_vmcs_vmread_safe(const struct vcpu *v, + u32 vmcs_encoding, u64 *val); +void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val); +enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v, + u32 vmcs_encoding, u64 val); + +DECLARE_PER_CPU(bool_t, vmxon); + +bool_t vmx_vcpu_pml_enabled(const struct vcpu *v); +int vmx_vcpu_enable_pml(struct vcpu *v); +void vmx_vcpu_disable_pml(struct vcpu *v); +void vmx_vcpu_flush_pml_buffer(struct vcpu *v); +bool_t vmx_domain_pml_enabled(const struct domain *d); +int vmx_domain_enable_pml(struct domain *d); +void vmx_domain_disable_pml(struct domain *d); +void vmx_domain_flush_pml_buffers(struct domain *d); + +void vmx_domain_update_eptp(struct domain *d); + +#endif /* ASM_X86_HVM_VMX_VMCS_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h new file mode 100644 index 0000000000..85530d2e0e --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h @@ -0,0 +1,692 @@ +/* + * vmx.h: VMX Architecture related definitions + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ +#ifndef __ASM_X86_HVM_VMX_VMX_H__ +#define __ASM_X86_HVM_VMX_VMX_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int8_t opt_ept_exec_sp; + +typedef union { + struct { + u64 r : 1, /* bit 0 - Read permission */ + w : 1, /* bit 1 - Write permission */ + x : 1, /* bit 2 - Execute permission */ + emt : 3, /* bits 5:3 - EPT Memory type */ + ipat : 1, /* bit 6 - Ignore PAT memory type */ + sp : 1, /* bit 7 - Is this a superpage? */ + a : 1, /* bit 8 - Access bit */ + d : 1, /* bit 9 - Dirty bit */ + recalc : 1, /* bit 10 - Software available 1 */ + snp : 1, /* bit 11 - VT-d snoop control in shared + EPT/VT-d usage */ + mfn : 40, /* bits 51:12 - Machine physical frame number */ + sa_p2mt : 6, /* bits 57:52 - Software available 2 */ + access : 4, /* bits 61:58 - p2m_access_t */ + _rsvd : 1, /* bit 62 - reserved */ + suppress_ve : 1; /* bit 63 - suppress #VE */ + }; + u64 epte; +} ept_entry_t; + +typedef struct { + /*use lxe[0] to save result */ + ept_entry_t lxe[5]; +} ept_walk_t; + +typedef enum { + ept_access_n = 0, /* No access permissions allowed */ + ept_access_r = 1, /* Read only */ + ept_access_w = 2, /* Write only */ + ept_access_rw = 3, /* Read & Write */ + ept_access_x = 4, /* Exec Only */ + ept_access_rx = 5, /* Read & Exec */ + ept_access_wx = 6, /* Write & Exec*/ + ept_access_all = 7, /* Full permissions */ +} ept_access_t; + +#define EPT_TABLE_ORDER 9 +#define EPTE_SUPER_PAGE_MASK 0x80 +#define EPTE_MFN_MASK 0xffffffffff000ULL +#define EPTE_AVAIL1_MASK 0xF00 +#define EPTE_EMT_MASK 0x38 +#define EPTE_IGMT_MASK 0x40 +#define EPTE_AVAIL1_SHIFT 8 +#define EPTE_EMT_SHIFT 3 +#define EPTE_IGMT_SHIFT 6 +#define EPTE_RWX_MASK 0x7 +#define EPTE_FLAG_MASK 0x7f + +#define EPT_EMT_UC 0 +#define EPT_EMT_WC 1 +#define EPT_EMT_RSV0 2 +#define EPT_EMT_RSV1 3 +#define EPT_EMT_WT 4 +#define EPT_EMT_WP 5 +#define EPT_EMT_WB 6 +#define EPT_EMT_RSV2 7 + +#define PI_xAPIC_NDST_MASK 0xFF00 + +void vmx_asm_vmexit_handler(struct cpu_user_regs); +void vmx_intr_assist(void); +void noreturn vmx_do_resume(void); +void vmx_vlapic_msr_changed(struct vcpu *v); +struct hvm_emulate_ctxt; +void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt); +void vmx_realmode(struct cpu_user_regs *regs); +void vmx_update_debug_state(struct vcpu *v); +void vmx_update_exception_bitmap(struct vcpu *v); +void vmx_update_cpu_exec_control(struct vcpu *v); +void vmx_update_secondary_exec_control(struct vcpu *v); + +#define POSTED_INTR_ON 0 +#define POSTED_INTR_SN 1 +static inline int pi_test_and_set_pir(uint8_t vector, struct pi_desc *pi_desc) +{ + return test_and_set_bit(vector, pi_desc->pir); +} + +static inline int pi_test_pir(uint8_t vector, const struct pi_desc *pi_desc) +{ + return test_bit(vector, pi_desc->pir); +} + +static inline int pi_test_and_set_on(struct pi_desc *pi_desc) +{ + return test_and_set_bit(POSTED_INTR_ON, &pi_desc->control); +} + +static inline void pi_set_on(struct pi_desc *pi_desc) +{ + set_bit(POSTED_INTR_ON, &pi_desc->control); +} + +static inline int pi_test_and_clear_on(struct pi_desc *pi_desc) +{ + return test_and_clear_bit(POSTED_INTR_ON, &pi_desc->control); +} + +static inline int pi_test_on(struct pi_desc *pi_desc) +{ + return pi_desc->on; +} + +static inline unsigned long pi_get_pir(struct pi_desc *pi_desc, int group) +{ + return xchg(&pi_desc->pir[group], 0); +} + +static inline int pi_test_sn(struct pi_desc *pi_desc) +{ + return pi_desc->sn; +} + +static inline void pi_set_sn(struct pi_desc *pi_desc) +{ + set_bit(POSTED_INTR_SN, &pi_desc->control); +} + +static inline void pi_clear_sn(struct pi_desc *pi_desc) +{ + clear_bit(POSTED_INTR_SN, &pi_desc->control); +} + +/* + * Exit Reasons + */ +#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 + +#define EXIT_REASON_EXCEPTION_NMI 0 +#define EXIT_REASON_EXTERNAL_INTERRUPT 1 +#define EXIT_REASON_TRIPLE_FAULT 2 +#define EXIT_REASON_INIT 3 +#define EXIT_REASON_SIPI 4 +#define EXIT_REASON_IO_SMI 5 +#define EXIT_REASON_OTHER_SMI 6 +#define EXIT_REASON_PENDING_VIRT_INTR 7 +#define EXIT_REASON_PENDING_VIRT_NMI 8 +#define EXIT_REASON_TASK_SWITCH 9 +#define EXIT_REASON_CPUID 10 +#define EXIT_REASON_GETSEC 11 +#define EXIT_REASON_HLT 12 +#define EXIT_REASON_INVD 13 +#define EXIT_REASON_INVLPG 14 +#define EXIT_REASON_RDPMC 15 +#define EXIT_REASON_RDTSC 16 +#define EXIT_REASON_RSM 17 +#define EXIT_REASON_VMCALL 18 +#define EXIT_REASON_VMCLEAR 19 +#define EXIT_REASON_VMLAUNCH 20 +#define EXIT_REASON_VMPTRLD 21 +#define EXIT_REASON_VMPTRST 22 +#define EXIT_REASON_VMREAD 23 +#define EXIT_REASON_VMRESUME 24 +#define EXIT_REASON_VMWRITE 25 +#define EXIT_REASON_VMXOFF 26 +#define EXIT_REASON_VMXON 27 +#define EXIT_REASON_CR_ACCESS 28 +#define EXIT_REASON_DR_ACCESS 29 +#define EXIT_REASON_IO_INSTRUCTION 30 +#define EXIT_REASON_MSR_READ 31 +#define EXIT_REASON_MSR_WRITE 32 +#define EXIT_REASON_INVALID_GUEST_STATE 33 +#define EXIT_REASON_MSR_LOADING 34 +#define EXIT_REASON_MWAIT_INSTRUCTION 36 +#define EXIT_REASON_MONITOR_TRAP_FLAG 37 +#define EXIT_REASON_MONITOR_INSTRUCTION 39 +#define EXIT_REASON_PAUSE_INSTRUCTION 40 +#define EXIT_REASON_MCE_DURING_VMENTRY 41 +#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 +#define EXIT_REASON_APIC_ACCESS 44 +#define EXIT_REASON_EOI_INDUCED 45 +#define EXIT_REASON_ACCESS_GDTR_OR_IDTR 46 +#define EXIT_REASON_ACCESS_LDTR_OR_TR 47 +#define EXIT_REASON_EPT_VIOLATION 48 +#define EXIT_REASON_EPT_MISCONFIG 49 +#define EXIT_REASON_INVEPT 50 +#define EXIT_REASON_RDTSCP 51 +#define EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 52 +#define EXIT_REASON_INVVPID 53 +#define EXIT_REASON_WBINVD 54 +#define EXIT_REASON_XSETBV 55 +#define EXIT_REASON_APIC_WRITE 56 +#define EXIT_REASON_INVPCID 58 +#define EXIT_REASON_VMFUNC 59 +#define EXIT_REASON_PML_FULL 62 +#define EXIT_REASON_XSAVES 63 +#define EXIT_REASON_XRSTORS 64 + +/* + * Interruption-information format + */ +#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ +#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ +#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ +#define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000 /* 12 */ +#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ +#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 + +/* + * Exit Qualifications for MOV for Control Register Access + */ +enum { + VMX_CR_ACCESS_TYPE_MOV_TO_CR, + VMX_CR_ACCESS_TYPE_MOV_FROM_CR, + VMX_CR_ACCESS_TYPE_CLTS, + VMX_CR_ACCESS_TYPE_LMSW, +}; +typedef union cr_access_qual { + unsigned long raw; + struct { + uint16_t cr:4, + access_type:2, /* VMX_CR_ACCESS_TYPE_* */ + lmsw_op_type:1, /* 0 => reg, 1 => mem */ + :1, + gpr:4, + :4; + uint16_t lmsw_data; + uint32_t :32; + }; +} __transparent__ cr_access_qual_t; + +/* + * Access Rights + */ +#define X86_SEG_AR_SEG_TYPE 0xf /* 3:0, segment type */ +#define X86_SEG_AR_DESC_TYPE (1u << 4) /* 4, descriptor type */ +#define X86_SEG_AR_DPL 0x60 /* 6:5, descriptor privilege level */ +#define X86_SEG_AR_SEG_PRESENT (1u << 7) /* 7, segment present */ +#define X86_SEG_AR_AVL (1u << 12) /* 12, available for system software */ +#define X86_SEG_AR_CS_LM_ACTIVE (1u << 13) /* 13, long mode active (CS only) */ +#define X86_SEG_AR_DEF_OP_SIZE (1u << 14) /* 14, default operation size */ +#define X86_SEG_AR_GRANULARITY (1u << 15) /* 15, granularity */ +#define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */ + +#define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n" +#define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */ +#define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n" +#define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */ +#define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */ +#define VMREAD_OPCODE ".byte 0x0f,0x78\n" +#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n" +#define VMWRITE_OPCODE ".byte 0x0f,0x79\n" +#define INVEPT_OPCODE ".byte 0x66,0x0f,0x38,0x80\n" /* m128,r64/32 */ +#define INVVPID_OPCODE ".byte 0x66,0x0f,0x38,0x81\n" /* m128,r64/32 */ +#define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n" +#define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n" + +#define MODRM_EAX_08 ".byte 0x08\n" /* ECX, [EAX] */ +#define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */ +#define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */ +#define MODRM_EAX_ECX ".byte 0xc1\n" /* EAX, ECX */ + +extern uint8_t posted_intr_vector; + +#define cpu_has_vmx_ept_exec_only_supported \ + (vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED) + +#define cpu_has_vmx_ept_wl4_supported \ + (vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) +#define cpu_has_vmx_ept_mt_uc (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_UC) +#define cpu_has_vmx_ept_mt_wb (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) +#define cpu_has_vmx_ept_2mb (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB) +#define cpu_has_vmx_ept_1gb (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB) +#define cpu_has_vmx_ept_ad (vmx_ept_vpid_cap & VMX_EPT_AD_BIT) +#define cpu_has_vmx_ept_invept_single_context \ + (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT) + +#define EPT_2MB_SHIFT 16 +#define EPT_1GB_SHIFT 17 +#define ept_has_2mb(c) ((c >> EPT_2MB_SHIFT) & 1) +#define ept_has_1gb(c) ((c >> EPT_1GB_SHIFT) & 1) + +#define INVEPT_SINGLE_CONTEXT 1 +#define INVEPT_ALL_CONTEXT 2 + +#define cpu_has_vmx_vpid_invvpid_individual_addr \ + (vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR) +#define cpu_has_vmx_vpid_invvpid_single_context \ + (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT) +#define cpu_has_vmx_vpid_invvpid_single_context_retaining_global \ + (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL) + +#define INVVPID_INDIVIDUAL_ADDR 0 +#define INVVPID_SINGLE_CONTEXT 1 +#define INVVPID_ALL_CONTEXT 2 +#define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3 + +#ifdef HAVE_AS_VMX +# define GAS_VMX_OP(yes, no) yes +#else +# define GAS_VMX_OP(yes, no) no +#endif + +static always_inline void __vmptrld(u64 addr) +{ + asm volatile ( +#ifdef HAVE_AS_VMX + "vmptrld %0\n" +#else + VMPTRLD_OPCODE MODRM_EAX_06 +#endif + /* CF==1 or ZF==1 --> BUG() */ + UNLIKELY_START(be, vmptrld) + _ASM_BUGFRAME_TEXT(0) + UNLIKELY_END_SECTION + : +#ifdef HAVE_AS_VMX + : "m" (addr), +#else + : "a" (&addr), +#endif + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) + : "memory"); +} + +static always_inline void __vmpclear(u64 addr) +{ + asm volatile ( +#ifdef HAVE_AS_VMX + "vmclear %0\n" +#else + VMCLEAR_OPCODE MODRM_EAX_06 +#endif + /* CF==1 or ZF==1 --> BUG() */ + UNLIKELY_START(be, vmclear) + _ASM_BUGFRAME_TEXT(0) + UNLIKELY_END_SECTION + : +#ifdef HAVE_AS_VMX + : "m" (addr), +#else + : "a" (&addr), +#endif + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) + : "memory"); +} + +static always_inline void __vmread(unsigned long field, unsigned long *value) +{ + asm volatile ( +#ifdef HAVE_AS_VMX + "vmread %1, %0\n\t" +#else + VMREAD_OPCODE MODRM_EAX_ECX +#endif + /* CF==1 or ZF==1 --> BUG() */ + UNLIKELY_START(be, vmread) + _ASM_BUGFRAME_TEXT(0) + UNLIKELY_END_SECTION +#ifdef HAVE_AS_VMX + : "=rm" (*value) + : "r" (field), +#else + : "=c" (*value) + : "a" (field), +#endif + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) + ); +} + +static always_inline void __vmwrite(unsigned long field, unsigned long value) +{ + asm volatile ( +#ifdef HAVE_AS_VMX + "vmwrite %1, %0\n" +#else + VMWRITE_OPCODE MODRM_EAX_ECX +#endif + /* CF==1 or ZF==1 --> BUG() */ + UNLIKELY_START(be, vmwrite) + _ASM_BUGFRAME_TEXT(0) + UNLIKELY_END_SECTION + : +#ifdef HAVE_AS_VMX + : "r" (field) , "rm" (value), +#else + : "a" (field) , "c" (value), +#endif + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) + ); +} + +static inline enum vmx_insn_errno vmread_safe(unsigned long field, + unsigned long *value) +{ + unsigned long ret = VMX_INSN_SUCCEED; + bool fail_invalid, fail_valid; + + asm volatile ( GAS_VMX_OP("vmread %[field], %[value]\n\t", + VMREAD_OPCODE MODRM_EAX_ECX) + ASM_FLAG_OUT(, "setc %[invalid]\n\t") + ASM_FLAG_OUT(, "setz %[valid]\n\t") + : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid), + ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid), + [value] GAS_VMX_OP("=rm", "=c") (*value) + : [field] GAS_VMX_OP("r", "a") (field)); + + if ( unlikely(fail_invalid) ) + ret = VMX_INSN_FAIL_INVALID; + else if ( unlikely(fail_valid) ) + __vmread(VM_INSTRUCTION_ERROR, &ret); + + return ret; +} + +static inline enum vmx_insn_errno vmwrite_safe(unsigned long field, + unsigned long value) +{ + unsigned long ret = VMX_INSN_SUCCEED; + bool fail_invalid, fail_valid; + + asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t", + VMWRITE_OPCODE MODRM_EAX_ECX) + ASM_FLAG_OUT(, "setc %[invalid]\n\t") + ASM_FLAG_OUT(, "setz %[valid]\n\t") + : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid), + ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid) + : [field] GAS_VMX_OP("r", "a") (field), + [value] GAS_VMX_OP("rm", "c") (value)); + + if ( unlikely(fail_invalid) ) + ret = VMX_INSN_FAIL_INVALID; + else if ( unlikely(fail_valid) ) + __vmread(VM_INSTRUCTION_ERROR, &ret); + + return ret; +} + +static always_inline void __invept(unsigned long type, uint64_t eptp) +{ + struct { + uint64_t eptp, rsvd; + } operand = { eptp }; + + /* + * If single context invalidation is not supported, we escalate to + * use all context invalidation. + */ + if ( (type == INVEPT_SINGLE_CONTEXT) && + !cpu_has_vmx_ept_invept_single_context ) + type = INVEPT_ALL_CONTEXT; + + asm volatile ( +#ifdef HAVE_AS_EPT + "invept %0, %1\n" +#else + INVEPT_OPCODE MODRM_EAX_08 +#endif + /* CF==1 or ZF==1 --> BUG() */ + UNLIKELY_START(be, invept) + _ASM_BUGFRAME_TEXT(0) + UNLIKELY_END_SECTION + : +#ifdef HAVE_AS_EPT + : "m" (operand), "r" (type), +#else + : "a" (&operand), "c" (type), +#endif + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) + : "memory" ); +} + +static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva) +{ + struct __packed { + u64 vpid:16; + u64 rsvd:48; + u64 gva; + } operand = {vpid, 0, gva}; + + /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */ + asm volatile ( "1: " +#ifdef HAVE_AS_EPT + "invvpid %0, %1\n" +#else + INVVPID_OPCODE MODRM_EAX_08 +#endif + /* CF==1 or ZF==1 --> BUG() */ + UNLIKELY_START(be, invvpid) + _ASM_BUGFRAME_TEXT(0) + UNLIKELY_END_SECTION "\n" + "2:" + _ASM_EXTABLE(1b, 2b) + : +#ifdef HAVE_AS_EPT + : "m" (operand), "r" (type), +#else + : "a" (&operand), "c" (type), +#endif + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) + : "memory" ); +} + +static inline void ept_sync_all(void) +{ + __invept(INVEPT_ALL_CONTEXT, 0); +} + +void ept_sync_domain(struct p2m_domain *p2m); + +static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva) +{ + int type = INVVPID_INDIVIDUAL_ADDR; + + /* + * If individual address invalidation is not supported, we escalate to + * use single context invalidation. + */ + if ( likely(cpu_has_vmx_vpid_invvpid_individual_addr) ) + goto execute_invvpid; + + type = INVVPID_SINGLE_CONTEXT; + + /* + * If single context invalidation is not supported, we escalate to + * use all context invalidation. + */ + if ( !cpu_has_vmx_vpid_invvpid_single_context ) + type = INVVPID_ALL_CONTEXT; + +execute_invvpid: + __invvpid(type, v->arch.hvm.n1asid.asid, (u64)gva); +} + +static inline void vpid_sync_all(void) +{ + __invvpid(INVVPID_ALL_CONTEXT, 0, 0); +} + +static inline void __vmxoff(void) +{ + asm volatile ( + VMXOFF_OPCODE + : : : "memory" ); +} + +static inline int __vmxon(u64 addr) +{ + int rc; + + asm volatile ( + "1: " VMXON_OPCODE MODRM_EAX_06 "\n" + " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */ + "2:\n" + ".section .fixup,\"ax\"\n" + "3: sub $2,%0 ; jmp 2b\n" /* #UD or #GP --> rc = -2 */ + ".previous\n" + _ASM_EXTABLE(1b, 3b) + : "=q" (rc) + : "0" (0), "a" (&addr) + : "memory"); + + return rc; +} + +int vmx_guest_x86_mode(struct vcpu *v); +unsigned int vmx_get_cpl(void); + +void vmx_inject_extint(int trap, uint8_t source); +void vmx_inject_nmi(void); + +int ept_p2m_init(struct p2m_domain *p2m); +void ept_p2m_uninit(struct p2m_domain *p2m); + +void ept_walk_table(struct domain *d, unsigned long gfn); +bool_t ept_handle_misconfig(uint64_t gpa); +int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int order, bool *ipat, p2m_type_t type); +void setup_ept_dump(void); +void p2m_init_altp2m_ept(struct domain *d, unsigned int i); +/* Locate an alternate p2m by its EPTP */ +unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp); + +void update_guest_eip(void); + +void vmx_pi_per_cpu_init(unsigned int cpu); +void vmx_pi_desc_fixup(unsigned int cpu); + +void vmx_sync_exit_bitmap(struct vcpu *v); + +#ifdef CONFIG_HVM +void vmx_pi_hooks_assign(struct domain *d); +void vmx_pi_hooks_deassign(struct domain *d); +#else +static inline void vmx_pi_hooks_assign(struct domain *d) {} +static inline void vmx_pi_hooks_deassign(struct domain *d) {} +#endif + +#define APIC_INVALID_DEST 0xffffffff + +/* EPT violation qualifications definitions */ +typedef union ept_qual { + unsigned long raw; + struct { + bool read:1, write:1, fetch:1, + eff_read:1, eff_write:1, eff_exec:1, /* eff_user_exec */:1, + gla_valid:1, + gla_fault:1; /* Valid iff gla_valid. */ + unsigned long /* pad */:55; + }; +} __transparent__ ept_qual_t; + +#define EPT_L4_PAGETABLE_SHIFT 39 +#define EPT_PAGETABLE_ENTRIES 512 + +/* #VE information page */ +typedef struct { + u32 exit_reason; + u32 semaphore; + u64 exit_qualification; + u64 gla; + u64 gpa; + u16 eptp_index; +} ve_info_t; + +/* VM-Exit instruction info for LIDT, LGDT, SIDT, SGDT */ +typedef union idt_or_gdt_instr_info { + unsigned long raw; + struct { + unsigned long scaling :2, /* bits 0:1 - Scaling */ + :5, /* bits 6:2 - Undefined */ + addr_size :3, /* bits 9:7 - Address size */ + :1, /* bit 10 - Cleared to 0 */ + operand_size :1, /* bit 11 - Operand size */ + :3, /* bits 14:12 - Undefined */ + segment_reg :3, /* bits 17:15 - Segment register */ + index_reg :4, /* bits 21:18 - Index register */ + index_reg_invalid :1, /* bit 22 - Index register invalid */ + base_reg :4, /* bits 26:23 - Base register */ + base_reg_invalid :1, /* bit 27 - Base register invalid */ + instr_identity :1, /* bit 28 - 0:GDT, 1:IDT */ + instr_write :1, /* bit 29 - 0:store, 1:load */ + :34; /* bits 30:63 - Undefined */ + }; +} idt_or_gdt_instr_info_t; + +/* VM-Exit instruction info for LLDT, LTR, SLDT, STR */ +typedef union ldt_or_tr_instr_info { + unsigned long raw; + struct { + unsigned long scaling :2, /* bits 0:1 - Scaling */ + :1, /* bit 2 - Undefined */ + reg1 :4, /* bits 6:3 - Reg1 */ + addr_size :3, /* bits 9:7 - Address size */ + mem_reg :1, /* bit 10 - Mem/Reg */ + :4, /* bits 14:11 - Undefined */ + segment_reg :3, /* bits 17:15 - Segment register */ + index_reg :4, /* bits 21:18 - Index register */ + index_reg_invalid :1, /* bit 22 - Index register invalid */ + base_reg :4, /* bits 26:23 - Base register */ + base_reg_invalid :1, /* bit 27 - Base register invalid */ + instr_identity :1, /* bit 28 - 0:LDT, 1:TR */ + instr_write :1, /* bit 29 - 0:store, 1:load */ + :34; /* bits 31:63 - Undefined */ + }; +} ldt_or_tr_instr_info_t; + +#endif /* __ASM_X86_HVM_VMX_VMX_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/vmx/vvmx.h b/xen/arch/x86/include/asm/hvm/vmx/vvmx.h new file mode 100644 index 0000000000..d5f68f30b1 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vmx/vvmx.h @@ -0,0 +1,214 @@ + +/* + * vvmx.h: Support virtual VMX for nested virtualization. + * + * Copyright (c) 2010, Intel Corporation. + * Author: Qing He + * Eddie Dong + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + */ +#ifndef __ASM_X86_HVM_VVMX_H__ +#define __ASM_X86_HVM_VVMX_H__ + +struct vvmcs_list { + unsigned long vvmcs_mfn; + struct list_head node; +}; + +struct nestedvmx { + /* + * vmxon_region_pa is also used to indicate whether a vcpu is in + * the VMX operation. When a vcpu is out of the VMX operation, its + * vmxon_region_pa is set to an invalid address INVALID_PADDR. We + * cannot use 0 for this purpose, because it's a valid VMXON region + * address. + */ + paddr_t vmxon_region_pa; + void *iobitmap[2]; /* map (va) of L1 guest I/O bitmap */ + struct vmx_msr_bitmap *msrbitmap; /* map (va) of L1 guest MSR bitmap */ + struct vmx_msr_bitmap *msr_merged; /* merged L1 and L2 MSR bitmap */ + /* deferred nested interrupt */ + struct { + unsigned long intr_info; + u32 error_code; + u8 source; + } intr; + struct { + bool_t enabled; + uint32_t exit_reason; + uint32_t exit_qual; + } ept; + uint32_t guest_vpid; + struct list_head launched_list; +}; + +#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx) + +/* bit 1, 2, 4 must be 1 */ +#define VMX_PINBASED_CTLS_DEFAULT1 0x16 +/* bit 1, 4-6,8,13-16,26 must be 1 */ +#define VMX_PROCBASED_CTLS_DEFAULT1 0x401e172 +/* bit 0-8, 10,11,13,14,16,17 must be 1 */ +#define VMX_EXIT_CTLS_DEFAULT1 0x36dff +/* bit 0-8, and 12 must be 1 */ +#define VMX_ENTRY_CTLS_DEFAULT1 0x11ff + + +union vmx_inst_info { + struct { + unsigned int scaling :2; /* bit 0-1 */ + unsigned int __rsvd0 :1; /* bit 2 */ + unsigned int reg1 :4; /* bit 3-6 */ + unsigned int addr_size :3; /* bit 7-9 */ + unsigned int memreg :1; /* bit 10 */ + unsigned int __rsvd1 :4; /* bit 11-14 */ + unsigned int segment :3; /* bit 15-17 */ + unsigned int index_reg :4; /* bit 18-21 */ + unsigned int index_reg_invalid :1; /* bit 22 */ + unsigned int base_reg :4; /* bit 23-26 */ + unsigned int base_reg_invalid :1; /* bit 27 */ + unsigned int reg2 :4; /* bit 28-31 */ + } fields; + u32 word; +}; + +int nvmx_vcpu_initialise(struct vcpu *v); +void nvmx_vcpu_destroy(struct vcpu *v); +int nvmx_vcpu_reset(struct vcpu *v); +uint64_t nvmx_vcpu_eptp_base(struct vcpu *v); +enum hvm_intblk nvmx_intr_blocked(struct vcpu *v); +bool_t nvmx_intercepts_exception( + struct vcpu *v, unsigned int vector, int error_code); +void nvmx_domain_relinquish_resources(struct domain *d); + +bool_t nvmx_ept_enabled(struct vcpu *v); + +#define EPT_TRANSLATE_SUCCEED 0 +#define EPT_TRANSLATE_VIOLATION 1 +#define EPT_TRANSLATE_MISCONFIG 2 +#define EPT_TRANSLATE_RETRY 3 + +int +nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, + unsigned int *page_order, uint8_t *p2m_acc, + bool_t access_r, bool_t access_w, bool_t access_x); +/* + * Virtual VMCS layout + * + * Since physical VMCS layout is unknown, a custom layout is used + * for virtual VMCS seen by guest. It occupies a 4k page, and the + * field is offset by an 9-bit offset into u64[], The offset is as + * follow, which means every pair has a max of 32 + * fields available. + * + * 9 7 5 0 + * -------------------------------- + * offset: | width | type | index | + * -------------------------------- + * + * Also, since the lower range has only one + * field: VPID, it is moved to a higher offset (63), and leaves the + * lower range to non-indexed field like VMCS revision. + * + */ + +struct vvmcs_header { + u32 revision; + u32 abort; +}; + +union vmcs_encoding { + struct { + u32 access_type : 1; + u32 index : 9; + u32 type : 2; + u32 rsv1 : 1; + u32 width : 2; + u32 rsv2 : 17; + }; + u32 word; +}; + +enum vvmcs_encoding_width { + VVMCS_WIDTH_16 = 0, + VVMCS_WIDTH_64, + VVMCS_WIDTH_32, + VVMCS_WIDTH_NATURAL, +}; + +enum vvmcs_encoding_type { + VVMCS_TYPE_CONTROL = 0, + VVMCS_TYPE_RO, + VVMCS_TYPE_GSTATE, + VVMCS_TYPE_HSTATE, +}; + +u64 get_vvmcs_virtual(void *vvmcs, u32 encoding); +u64 get_vvmcs_real(const struct vcpu *, u32 encoding); +void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val); +void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val); +enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 *val); +enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *, u32 encoding, + u64 *val); +enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val); +enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding, + u64 val); + +#define get_vvmcs(vcpu, encoding) \ + (cpu_has_vmx_vmcs_shadowing ? \ + get_vvmcs_real(vcpu, encoding) : \ + get_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding)) + +#define set_vvmcs(vcpu, encoding, val) \ + (cpu_has_vmx_vmcs_shadowing ? \ + set_vvmcs_real(vcpu, encoding, val) : \ + set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val)) + +#define get_vvmcs_safe(vcpu, encoding, val) \ + (cpu_has_vmx_vmcs_shadowing ? \ + get_vvmcs_real_safe(vcpu, encoding, val) : \ + get_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val)) + +#define set_vvmcs_safe(vcpu, encoding, val) \ + (cpu_has_vmx_vmcs_shadowing ? \ + set_vvmcs_real_safe(vcpu, encoding, val) : \ + set_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val)) + +void nvmx_destroy_vmcs(struct vcpu *v); +int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason); +int nvmx_msr_read_intercept(unsigned int msr, + u64 *msr_content); + +void nvmx_update_exec_control(struct vcpu *v, u32 value); +void nvmx_update_secondary_exec_control(struct vcpu *v, + unsigned long value); +void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value); +void nvmx_switch_guest(void); +void nvmx_idtv_handling(void); +u64 nvmx_get_tsc_offset(struct vcpu *v); +int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, + unsigned int exit_reason); +void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr); + +uint64_t nept_get_ept_vpid_cap(void); + +int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, + unsigned int *page_order, uint32_t rwx_acc, + unsigned long *l1gfn, uint8_t *p2m_acc, + uint64_t *exit_qual, uint32_t *exit_reason); +int nvmx_cpu_up_prepare(unsigned int cpu); +void nvmx_cpu_dead(unsigned int cpu); +#endif /* __ASM_X86_HVM_VVMX_H__ */ + diff --git a/xen/arch/x86/include/asm/hvm/vpic.h b/xen/arch/x86/include/asm/hvm/vpic.h new file mode 100644 index 0000000000..d71b270193 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vpic.h @@ -0,0 +1,40 @@ +/* + * i8259 interrupt controller emulation + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005 Intel Corp + * Copyright (c) 2006 Keir Fraser, XenSource Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __ASM_X86_HVM_VPIC_H__ +#define __ASM_X86_HVM_VPIC_H__ + +struct domain; +struct vcpu; + +void vpic_irq_positive_edge(struct domain *d, int irq); +void vpic_irq_negative_edge(struct domain *d, int irq); +void vpic_init(struct domain *d); +void vpic_reset(struct domain *d); +int vpic_ack_pending_irq(struct vcpu *v); +int is_periodic_irq(struct vcpu *v, int irq, int type); + +#endif /* __ASM_X86_HVM_VPIC_H__ */ diff --git a/xen/arch/x86/include/asm/hvm/vpt.h b/xen/arch/x86/include/asm/hvm/vpt.h new file mode 100644 index 0000000000..74c0cedd11 --- /dev/null +++ b/xen/arch/x86/include/asm/hvm/vpt.h @@ -0,0 +1,205 @@ +/* + * vpt.h: Virtual Platform Timer definitions + * + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_HVM_VPT_H__ +#define __ASM_X86_HVM_VPT_H__ + +#include +#include +#include +#include + +/* + * Abstract layer of periodic time, one short time. + */ +typedef void time_cb(struct vcpu *v, void *opaque); + +struct periodic_time { + struct list_head list; + bool on_list; + bool one_shot; + bool do_not_freeze; + bool irq_issued; + bool warned_timeout_too_short; + bool level; +#define PTSRC_isa 1 /* ISA time source */ +#define PTSRC_lapic 2 /* LAPIC time source */ +#define PTSRC_ioapic 3 /* IOAPIC time source */ + u8 source; /* PTSRC_ */ + u8 irq; + struct vcpu *vcpu; /* vcpu timer interrupt delivers to */ + u32 pending_intr_nr; /* pending timer interrupts */ + u64 period; /* frequency in ns */ + s_time_t scheduled; /* scheduled timer interrupt */ + u64 last_plt_gtime; /* platform time when last IRQ is injected */ + struct timer timer; /* ac_timer */ + time_cb *cb; + void *priv; /* point back to platform time source */ +}; + + +#define PIT_FREQ 1193182 +#define PIT_BASE 0x40 + +typedef struct PITState { + /* Hardware state */ + struct hvm_hw_pit hw; + /* Last time the counters read zero, for calcuating counter reads */ + int64_t count_load_time[3]; + /* Channel 0 IRQ handling. */ + struct periodic_time pt0; + spinlock_t lock; +} PITState; + +struct hpet_registers { + /* Memory-mapped, software visible registers */ + uint64_t capability; /* capabilities */ + uint64_t config; /* configuration */ + uint64_t isr; /* interrupt status reg */ + uint64_t mc64; /* main counter */ + struct { /* timers */ + uint64_t config; /* configuration/cap */ + uint64_t cmp; /* comparator */ + uint64_t fsb; /* FSB route, not supported now */ + } timers[HPET_TIMER_NUM]; + + /* Hidden register state */ + uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ + uint64_t comparator64[HPET_TIMER_NUM]; /* 64 bit running comparator */ +}; + +typedef struct HPETState { + struct hpet_registers hpet; + uint64_t stime_freq; + uint64_t hpet_to_ns_scale; /* hpet ticks to ns (multiplied by 2^10) */ + uint64_t hpet_to_ns_limit; /* max hpet ticks convertable to ns */ + uint64_t mc_offset; + struct periodic_time pt[HPET_TIMER_NUM]; + rwlock_t lock; +} HPETState; + +typedef struct RTCState { + /* Hardware state */ + struct hvm_hw_rtc hw; + /* RTC's idea of the current time */ + struct tm current_tm; + /* update-ended timer */ + struct timer update_timer; + struct timer update_timer2; + uint64_t next_update_time; + /* alarm timer */ + struct timer alarm_timer; + /* periodic timer */ + struct periodic_time pt; + s_time_t start_time; + s_time_t check_ticks_since; + int period; + uint8_t pt_dead_ticks; + uint32_t use_timer; + spinlock_t lock; +} RTCState; + +#define FREQUENCE_PMTIMER 3579545 /* Timer should run at 3.579545 MHz */ +typedef struct PMTState { + struct vcpu *vcpu; /* Keeps sync with this vcpu's guest-time */ + uint64_t last_gtime; /* Last (guest) time we updated the timer */ + uint32_t not_accounted; /* time not accounted at last update */ + uint64_t scale; /* Multiplier to get from tsc to timer ticks */ + struct timer timer; /* To make sure we send SCIs */ + spinlock_t lock; +} PMTState; + +struct pl_time { /* platform time */ + struct RTCState vrtc; + struct HPETState vhpet; + struct PMTState vpmt; + /* + * Functions which want to modify the vcpu field of the vpt need + * to hold the global lock (pt_migrate) in write mode together + * with the per-vcpu locks of the lists being modified. Functions + * that want to lock a periodic_timer that's possibly on a + * different vCPU list need to take the lock in read mode first in + * order to prevent the vcpu field of periodic_timer from + * changing. + * + * Note that two vcpu locks cannot be held at the same time to + * avoid a deadlock. + */ + rwlock_t pt_migrate; + /* guest_time = Xen sys time + stime_offset */ + int64_t stime_offset; + /* Ensures monotonicity in appropriate timer modes. */ + uint64_t last_guest_time; + spinlock_t pl_time_lock; + struct domain *domain; +}; + +void pt_save_timer(struct vcpu *v); +void pt_restore_timer(struct vcpu *v); +int pt_update_irq(struct vcpu *v); +struct hvm_intack; +void pt_intr_post(struct vcpu *v, struct hvm_intack intack); +void pt_migrate(struct vcpu *v); + +void pt_adjust_global_vcpu_target(struct vcpu *v); +#define pt_global_vcpu_target(d) \ + (is_hvm_domain(d) && (d)->arch.hvm.i8259_target ? \ + (d)->arch.hvm.i8259_target : \ + (d)->vcpu ? (d)->vcpu[0] : NULL) + +void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt); + +/* Is given periodic timer active? */ +#define pt_active(pt) ((pt)->on_list || (pt)->pending_intr_nr) + +/* + * Create/destroy a periodic (or one-shot!) timer. + * The given periodic timer structure must be initialised with zero bytes, + * except for the 'source' field which must be initialised with the + * correct PTSRC_ value. The initialised timer structure can then be passed + * to {create,destroy}_periodic_time() any number of times and in any order. + * Note that, for a given periodic timer, invocations of these functions MUST + * be serialised. + */ +void create_periodic_time( + struct vcpu *v, struct periodic_time *pt, uint64_t delta, + uint64_t period, uint8_t irq, time_cb *cb, void *data, bool level); +void destroy_periodic_time(struct periodic_time *pt); + +int pv_pit_handler(int port, int data, int write); +void pit_reset(struct domain *d); + +void pit_init(struct domain *d, unsigned long cpu_khz); +void pit_stop_channel0_irq(PITState * pit); +void pit_deinit(struct domain *d); +void rtc_init(struct domain *d); +void rtc_migrate_timers(struct vcpu *v); +void rtc_deinit(struct domain *d); +void rtc_reset(struct domain *d); +void rtc_update_clock(struct domain *d); + +void pmtimer_init(struct vcpu *v); +void pmtimer_deinit(struct domain *d); +void pmtimer_reset(struct domain *d); +int pmtimer_change_ioport(struct domain *d, uint64_t version); + +void hpet_init(struct domain *d); +void hpet_deinit(struct domain *d); +void hpet_reset(struct domain *d); + +#endif /* __ASM_X86_HVM_VPT_H__ */ diff --git a/xen/arch/x86/include/asm/hypercall.h b/xen/arch/x86/include/asm/hypercall.h new file mode 100644 index 0000000000..5d394d4923 --- /dev/null +++ b/xen/arch/x86/include/asm/hypercall.h @@ -0,0 +1,198 @@ +/****************************************************************************** + * asm-x86/hypercall.h + */ + +#ifndef __ASM_X86_HYPERCALL_H__ +#define __ASM_X86_HYPERCALL_H__ + +#include +#include +#include +#include /* for do_mca */ +#include + +typedef unsigned long hypercall_fn_t( + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long); + +typedef struct { + hypercall_fn_t *native; +#ifdef CONFIG_PV32 + hypercall_fn_t *compat; +#endif +} pv_hypercall_table_t; + +typedef struct { + uint8_t native; +#ifdef CONFIG_COMPAT + uint8_t compat; +#endif +} hypercall_args_t; + +extern const hypercall_args_t hypercall_args_table[NR_hypercalls]; + +#ifdef CONFIG_PV +extern const pv_hypercall_table_t pv_hypercall_table[]; +void pv_hypercall(struct cpu_user_regs *regs); +#endif + +void pv_ring1_init_hypercall_page(void *ptr); +void pv_ring3_init_hypercall_page(void *ptr); + +/* + * Both do_mmuext_op() and do_mmu_update(): + * We steal the m.s.b. of the @count parameter to indicate whether this + * invocation of do_mmu_update() is resuming a previously preempted call. + */ +#define MMU_UPDATE_PREEMPTED (~(~0U>>1)) + +extern long +do_event_channel_op_compat( + XEN_GUEST_HANDLE_PARAM(evtchn_op_t) uop); + +/* Legacy hypercall (as of 0x00030202). */ +extern long do_physdev_op_compat( + XEN_GUEST_HANDLE(physdev_op_t) uop); + +/* Legacy hypercall (as of 0x00030101). */ +extern long do_sched_op_compat( + int cmd, unsigned long arg); + +extern long +do_set_trap_table( + XEN_GUEST_HANDLE_PARAM(const_trap_info_t) traps); + +extern long +do_mmu_update( + XEN_GUEST_HANDLE_PARAM(mmu_update_t) ureqs, + unsigned int count, + XEN_GUEST_HANDLE_PARAM(uint) pdone, + unsigned int foreigndom); + +extern long +do_set_gdt( + XEN_GUEST_HANDLE_PARAM(xen_ulong_t) frame_list, + unsigned int entries); + +extern long +do_stack_switch( + unsigned long ss, + unsigned long esp); + +extern long +do_fpu_taskswitch( + int set); + +extern long +do_set_debugreg( + int reg, + unsigned long value); + +extern unsigned long +do_get_debugreg( + int reg); + +extern long +do_update_descriptor( + uint64_t gaddr, seg_desc_t desc); + +extern long +do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc); + +extern long +do_update_va_mapping( + unsigned long va, + u64 val64, + unsigned long flags); + +extern long +do_physdev_op( + int cmd, XEN_GUEST_HANDLE_PARAM(void) arg); + +extern long +do_update_va_mapping_otherdomain( + unsigned long va, + u64 val64, + unsigned long flags, + domid_t domid); + +extern long +do_mmuext_op( + XEN_GUEST_HANDLE_PARAM(mmuext_op_t) uops, + unsigned int count, + XEN_GUEST_HANDLE_PARAM(uint) pdone, + unsigned int foreigndom); + +extern long do_callback_op( + int cmd, XEN_GUEST_HANDLE_PARAM(const_void) arg); + +extern unsigned long +do_iret( + void); + +extern long +do_set_callbacks( + unsigned long event_address, + unsigned long failsafe_address, + unsigned long syscall_address); + +extern long +do_set_segment_base( + unsigned int which, + unsigned long base); + +#ifdef CONFIG_COMPAT + +#include +#include + +extern int +compat_physdev_op( + int cmd, + XEN_GUEST_HANDLE_PARAM(void) arg); + +extern int +arch_compat_vcpu_op( + int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg); + +extern int compat_mmuext_op( + XEN_GUEST_HANDLE_PARAM(void) arg, + unsigned int count, + XEN_GUEST_HANDLE_PARAM(uint) pdone, + unsigned int foreigndom); + +extern int compat_platform_op( + XEN_GUEST_HANDLE_PARAM(void) u_xenpf_op); + +extern long compat_callback_op( + int cmd, XEN_GUEST_HANDLE(void) arg); + +extern int compat_update_va_mapping( + unsigned int va, u32 lo, u32 hi, unsigned int flags); + +extern int compat_update_va_mapping_otherdomain( + unsigned int va, u32 lo, u32 hi, unsigned int flags, domid_t domid); + +DEFINE_XEN_GUEST_HANDLE(trap_info_compat_t); +extern int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps); + +extern int compat_set_gdt( + XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int entries); + +extern int compat_update_descriptor( + u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi); + +extern unsigned int compat_iret(void); + +extern int compat_nmi_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg); + +extern long compat_set_callbacks( + unsigned long event_selector, unsigned long event_address, + unsigned long failsafe_selector, unsigned long failsafe_address); + +DEFINE_XEN_GUEST_HANDLE(physdev_op_compat_t); +extern int compat_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_compat_t) uop); + +#endif /* CONFIG_COMPAT */ + +#endif /* __ASM_X86_HYPERCALL_H__ */ diff --git a/xen/arch/x86/include/asm/i387.h b/xen/arch/x86/include/asm/i387.h new file mode 100644 index 0000000000..a783549db9 --- /dev/null +++ b/xen/arch/x86/include/asm/i387.h @@ -0,0 +1,40 @@ +/* + * include/asm-i386/i387.h + * + * Copyright (C) 1994 Linus Torvalds + * + * Pentium III FXSR, SSE support + * General FPU state handling cleanups + * Gareth Hughes , May 2000 + */ + +#ifndef __ASM_I386_I387_H +#define __ASM_I386_I387_H + +#include + +/* Byte offset of the stored word size within the FXSAVE area/portion. */ +#define FPU_WORD_SIZE_OFFSET 511 + +struct ix87_env { + uint16_t fcw, _res0; + uint16_t fsw, _res1; + uint16_t ftw, _res2; + uint32_t fip; + uint16_t fcs; + uint16_t fop; + uint32_t fdp; + uint16_t fds, _res6; +}; + +void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts); +void vcpu_restore_fpu_lazy(struct vcpu *v); +void vcpu_save_fpu(struct vcpu *v); +void save_fpu_enable(void); + +int vcpu_init_fpu(struct vcpu *v); +struct xsave_struct; +void vcpu_setup_fpu(struct vcpu *v, struct xsave_struct *xsave_area, + const void *data, unsigned int fcw_default); +void vcpu_destroy_fpu(struct vcpu *v); +#endif /* __ASM_I386_I387_H */ diff --git a/xen/arch/x86/include/asm/init.h b/xen/arch/x86/include/asm/init.h new file mode 100644 index 0000000000..5295b35e63 --- /dev/null +++ b/xen/arch/x86/include/asm/init.h @@ -0,0 +1,4 @@ +#ifndef _XEN_ASM_INIT_H +#define _XEN_ASM_INIT_H + +#endif /* _XEN_ASM_INIT_H */ diff --git a/xen/arch/x86/include/asm/invpcid.h b/xen/arch/x86/include/asm/invpcid.h new file mode 100644 index 0000000000..bf5c30313a --- /dev/null +++ b/xen/arch/x86/include/asm/invpcid.h @@ -0,0 +1,67 @@ +#ifndef _ASM_X86_INVPCID_H_ +#define _ASM_X86_INVPCID_H_ + +#include + +extern bool use_invpcid; + +#define INVPCID_OPCODE ".byte 0x66, 0x0f, 0x38, 0x82\n" +#define MODRM_ECX_01 ".byte 0x01\n" + +static inline void invpcid(unsigned int pcid, unsigned long addr, + unsigned int type) +{ + struct { + uint64_t pcid:12; + uint64_t reserved:52; + uint64_t addr; + } desc = { .pcid = pcid, .addr = addr }; + + asm volatile ( +#ifdef HAVE_AS_INVPCID + "invpcid %[desc], %q[type]" + : /* No output */ + : [desc] "m" (desc), [type] "r" (type) +#else + INVPCID_OPCODE MODRM_ECX_01 + : /* No output */ + : "a" (type), "c" (&desc) +#endif + : "memory" ); +} + +/* Flush all mappings for a given PCID and addr, not including globals */ +static inline void invpcid_flush_one(unsigned int pcid, unsigned long addr) +{ + invpcid(pcid, addr, X86_INVPCID_INDIV_ADDR); +} + +/* Flush all mappings for a given PCID, not including globals */ +static inline void invpcid_flush_single_context(unsigned int pcid) +{ + invpcid(pcid, 0, X86_INVPCID_SINGLE_CTXT); +} + +/* Flush all mappings, including globals, for all PCIDs */ +static inline void invpcid_flush_all(void) +{ + invpcid(0, 0, X86_INVPCID_ALL_INCL_GLOBAL); +} + +/* Flush all mappings for all PCIDs, excluding globals */ +static inline void invpcid_flush_all_nonglobals(void) +{ + invpcid(0, 0, X86_INVPCID_ALL_NON_GLOBAL); +} + +#endif /* _ASM_X86_INVPCID_H_ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/io.h b/xen/arch/x86/include/asm/io.h new file mode 100644 index 0000000000..92b784a861 --- /dev/null +++ b/xen/arch/x86/include/asm/io.h @@ -0,0 +1,56 @@ +#ifndef _ASM_IO_H +#define _ASM_IO_H + +#include +#include + +#define readb(x) (*(volatile uint8_t *)(x)) +#define readw(x) (*(volatile uint16_t *)(x)) +#define readl(x) (*(volatile uint32_t *)(x)) +#define readq(x) (*(volatile uint64_t *)(x)) +#define writeb(d,x) (*(volatile uint8_t *)(x) = (d)) +#define writew(d,x) (*(volatile uint16_t *)(x) = (d)) +#define writel(d,x) (*(volatile uint32_t *)(x) = (d)) +#define writeq(d,x) (*(volatile uint64_t *)(x) = (d)) + +#define __OUT1(s,x) \ +static inline void out##s(unsigned x value, unsigned short port) { + +#define __OUT2(s,s1,s2) \ +__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" + +#define __OUT(s,s1,x) \ +__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ +__OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port));} + +#define __IN1(s) \ +static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; + +#define __IN2(s,s1,s2) \ +__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" + +#define __IN(s,s1,i...) \ +__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ +__IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } + +#define RETURN_TYPE unsigned char +__IN(b,"") +#undef RETURN_TYPE +#define RETURN_TYPE unsigned short +__IN(w,"") +#undef RETURN_TYPE +#define RETURN_TYPE unsigned int +__IN(l,"") +#undef RETURN_TYPE + +__OUT(b,"b",char) +__OUT(w,"w",short) +__OUT(l,,int) + +/* Function pointer used to handle platform specific I/O port emulation. */ +#define IOEMUL_QUIRK_STUB_BYTES 9 +struct cpu_user_regs; +extern unsigned int (*ioemul_handle_quirk)( + u8 opcode, char *io_emul_stub, struct cpu_user_regs *regs); + +#endif diff --git a/xen/arch/x86/include/asm/io_apic.h b/xen/arch/x86/include/asm/io_apic.h new file mode 100644 index 0000000000..ef0878b09e --- /dev/null +++ b/xen/arch/x86/include/asm/io_apic.h @@ -0,0 +1,212 @@ +#ifndef __ASM_IO_APIC_H +#define __ASM_IO_APIC_H + +#include +#include +#include +#include +#include + +/* + * Intel IO-APIC support for SMP and UP systems. + * + * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar + */ + +#define IO_APIC_BASE(idx) \ + ((volatile uint32_t *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + (idx)) \ + + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) + +#define IO_APIC_ID(idx) (mp_ioapics[idx].mpc_apicid) + +/* I/O Unit Redirection Table */ +#define IO_APIC_REDIR_VECTOR_MASK 0x000FF +#define IO_APIC_REDIR_DELIV_MODE_MASK 0x00700 +#define IO_APIC_REDIR_DEST_LOGICAL 0x00800 +#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000 +#define IO_APIC_REDIR_SEND_PENDING (1 << 12) +#define IO_APIC_REDIR_REMOTE_IRR (1 << 14) +#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15) +#define IO_APIC_REDIR_MASKED (1 << 16) + +/* + * The structure of the IO-APIC: + */ +union IO_APIC_reg_00 { + uint32_t raw; + struct { + unsigned int __reserved_2:14; + unsigned int LTS:1; + unsigned int delivery_type:1; + unsigned int __reserved_1:8; + unsigned int ID:8; + } bits; +}; + +union IO_APIC_reg_01 { + uint32_t raw; + struct { + unsigned int version:8; + unsigned int __reserved_2:7; + unsigned int PRQ:1; + unsigned int entries:8; + unsigned int __reserved_1:8; + } bits; +}; + +union IO_APIC_reg_02 { + uint32_t raw; + struct { + unsigned int __reserved_2:24; + unsigned int arbitration:4; + unsigned int __reserved_1:4; + } bits; +}; + +union IO_APIC_reg_03 { + uint32_t raw; + struct { + unsigned int boot_DT:1; + unsigned int __reserved_1:31; + } bits; +}; + +/* + * # of IO-APICs and # of IRQ routing registers + */ +extern int nr_ioapics; +extern int nr_ioapic_entries[MAX_IO_APICS]; + +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_SMI = 2, + dest__reserved_1 = 3, + dest_NMI = 4, + dest_INIT = 5, + dest__reserved_2 = 6, + dest_ExtINT = 7 +}; + +struct IO_APIC_route_entry { + unsigned int vector:8; + unsigned int delivery_mode:3; /* + * 000: FIXED + * 001: lowest prio + * 111: ExtINT + */ + unsigned int dest_mode:1; /* 0: physical, 1: logical */ + unsigned int delivery_status:1; + unsigned int polarity:1; /* 0: low, 1: high */ + unsigned int irr:1; + unsigned int trigger:1; /* 0: edge, 1: level */ + unsigned int mask:1; /* 0: enabled, 1: disabled */ + unsigned int __reserved_2:15; + + union { + struct { + unsigned int __reserved_1:24; + unsigned int physical_dest:4; + unsigned int __reserved_2:4; + } physical; + + struct { + unsigned int __reserved_1:24; + unsigned int logical_dest:8; + } logical; + + /* used when Interrupt Remapping with EIM is enabled */ + unsigned int dest32; + } dest; +}; + +/* + * MP-BIOS irq configuration table structures: + */ + +/* I/O APIC entries */ +extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; + +/* Base GSI for this IO APIC */ +unsigned int io_apic_gsi_base(unsigned int apic); + +/* Only need to remap ioapic RTE (reg: 10~3Fh) */ +#define ioapic_reg_remapped(reg) (iommu_intremap && ((reg) >= 0x10)) + +static inline unsigned int __io_apic_read(unsigned int apic, unsigned int reg) +{ + volatile uint32_t *regs = IO_APIC_BASE(apic); + + regs[0] = reg; + return regs[4]; +} + +static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) +{ + if ( ioapic_reg_remapped(reg) ) + return iommu_read_apic_from_ire(apic, reg); + return __io_apic_read(apic, reg); +} + +static inline void __io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) +{ + volatile uint32_t *regs = IO_APIC_BASE(apic); + + regs[0] = reg; + regs[4] = value; +} + +static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) +{ + if ( ioapic_reg_remapped(reg) ) + return iommu_update_ire_from_apic(apic, reg, value); + __io_apic_write(apic, reg, value); +} + +/* + * Re-write a value: to be used for read-modify-write + * cycles where the read already set up the index register. + */ +static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) +{ + if ( ioapic_reg_remapped(reg) ) + return iommu_update_ire_from_apic(apic, reg, value); + *(IO_APIC_BASE(apic) + 4) = value; +} + +/* 1 if "noapic" boot option passed */ +extern bool skip_ioapic_setup; +extern bool ioapic_ack_new; +extern bool ioapic_ack_forced; + +extern int io_apic_get_unique_id (int ioapic, int apic_id); +extern int io_apic_get_version (int ioapic); +extern int io_apic_get_redir_entries (int ioapic); +extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); + +extern void ioapic_init(void); + +extern void ioapic_suspend(void); +extern void ioapic_resume(void); + +extern void dump_ioapic_irq_info(void); + +extern struct IO_APIC_route_entry __ioapic_read_entry( + unsigned int apic, unsigned int pin, bool raw); +void __ioapic_write_entry( + unsigned int apic, unsigned int pin, bool raw, + struct IO_APIC_route_entry); + +extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); +extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); +extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); +extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); +extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries, + bool raw); + +unsigned highest_gsi(void); + +int ioapic_guest_read( unsigned long physbase, unsigned int reg, u32 *pval); +int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 pval); + +#endif diff --git a/xen/arch/x86/include/asm/iocap.h b/xen/arch/x86/include/asm/iocap.h new file mode 100644 index 0000000000..eee47228d4 --- /dev/null +++ b/xen/arch/x86/include/asm/iocap.h @@ -0,0 +1,21 @@ +/****************************************************************************** + * iocap.h + * + * Architecture-specific per-domain I/O capabilities. + */ + +#ifndef __X86_IOCAP_H__ +#define __X86_IOCAP_H__ + +#define ioports_permit_access(d, s, e) \ + rangeset_add_range((d)->arch.ioport_caps, s, e) +#define ioports_deny_access(d, s, e) \ + rangeset_remove_range((d)->arch.ioport_caps, s, e) +#define ioports_access_permitted(d, s, e) \ + rangeset_contains_range((d)->arch.ioport_caps, s, e) + +#define cache_flush_permitted(d) \ + (!rangeset_is_empty((d)->iomem_caps) || \ + !rangeset_is_empty((d)->arch.ioport_caps)) + +#endif /* __X86_IOCAP_H__ */ diff --git a/xen/arch/x86/include/asm/iommu.h b/xen/arch/x86/include/asm/iommu.h new file mode 100644 index 0000000000..de46149b40 --- /dev/null +++ b/xen/arch/x86/include/asm/iommu.h @@ -0,0 +1,155 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . +*/ +#ifndef __ARCH_X86_IOMMU_H__ +#define __ARCH_X86_IOMMU_H__ + +#include +#include +#include +#include +#include +#include +#include + +#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 + +struct g2m_ioport { + struct list_head list; + unsigned int gport; + unsigned int mport; + unsigned int np; +}; + +#define IOMMU_PAGE_SHIFT 12 +#define IOMMU_PAGE_SIZE (1 << IOMMU_PAGE_SHIFT) +#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1)) + +typedef uint64_t daddr_t; + +#define __dfn_to_daddr(dfn) ((daddr_t)(dfn) << IOMMU_PAGE_SHIFT) +#define __daddr_to_dfn(daddr) ((daddr) >> IOMMU_PAGE_SHIFT) + +#define dfn_to_daddr(dfn) __dfn_to_daddr(dfn_x(dfn)) +#define daddr_to_dfn(daddr) _dfn(__daddr_to_dfn(daddr)) + +struct arch_iommu +{ + spinlock_t mapping_lock; /* io page table lock */ + struct { + struct page_list_head list; + spinlock_t lock; + } pgtables; + + struct list_head identity_maps; + + union { + /* Intel VT-d */ + struct { + uint64_t pgd_maddr; /* io page directory machine address */ + unsigned int agaw; /* adjusted guest address width, 0 is level 2 30-bit */ + unsigned long *iommu_bitmap; /* bitmap of iommu(s) that the domain uses */ + } vtd; + /* AMD IOMMU */ + struct { + unsigned int paging_mode; + struct page_info *root_table; + struct guest_iommu *g_iommu; + } amd; + }; +}; + +extern struct iommu_ops iommu_ops; + +#ifdef NDEBUG +# include +# define iommu_call(ops, fn, args...) ({ \ + (void)(ops); \ + alternative_call(iommu_ops.fn, ## args); \ +}) + +# define iommu_vcall(ops, fn, args...) ({ \ + (void)(ops); \ + alternative_vcall(iommu_ops.fn, ## args); \ +}) +#endif + +static inline const struct iommu_ops *iommu_get_ops(void) +{ + BUG_ON(!iommu_ops.init); + return &iommu_ops; +} + +struct iommu_init_ops { + const struct iommu_ops *ops; + int (*setup)(void); + bool (*supports_x2apic)(void); +}; + +extern const struct iommu_init_ops *iommu_init_ops; + +void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value); +unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg); +int iommu_setup_hpet_msi(struct msi_desc *); + +static inline int iommu_adjust_irq_affinities(void) +{ + return iommu_ops.adjust_irq_affinities + ? iommu_ops.adjust_irq_affinities() + : 0; +} + +static inline bool iommu_supports_x2apic(void) +{ + return iommu_init_ops && iommu_init_ops->supports_x2apic + ? iommu_init_ops->supports_x2apic() + : false; +} + +int iommu_enable_x2apic(void); + +static inline void iommu_disable_x2apic(void) +{ + if ( x2apic_enabled && iommu_ops.disable_x2apic ) + iommu_ops.disable_x2apic(); +} + +int iommu_identity_mapping(struct domain *d, p2m_access_t p2ma, + paddr_t base, paddr_t end, + unsigned int flag); +void iommu_identity_map_teardown(struct domain *d); + +extern bool untrusted_msi; + +int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq, + const uint8_t gvec); + +#define iommu_sync_cache(addr, size) ({ \ + const struct iommu_ops *ops = iommu_get_ops(); \ + \ + if ( ops->sync_cache ) \ + iommu_vcall(ops, sync_cache, addr, size); \ +}) + +int __must_check iommu_free_pgtables(struct domain *d); +struct page_info *__must_check iommu_alloc_pgtable(struct domain *d); + +#endif /* !__ARCH_X86_IOMMU_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/ioreq.h b/xen/arch/x86/include/asm/ioreq.h new file mode 100644 index 0000000000..d06ce9a6ea --- /dev/null +++ b/xen/arch/x86/include/asm/ioreq.h @@ -0,0 +1,39 @@ +/* + * ioreq.h: Hardware virtual machine assist interface definitions. + * + * This is a wrapper which purpose is to not include arch HVM specific header + * from the common code. + * + * Copyright (c) 2016 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_IOREQ_H__ +#define __ASM_X86_IOREQ_H__ + +#ifdef CONFIG_HVM +#include +#endif + +#endif /* __ASM_X86_IOREQ_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/irq.h b/xen/arch/x86/include/asm/irq.h new file mode 100644 index 0000000000..7c825e9d9c --- /dev/null +++ b/xen/arch/x86/include/asm/irq.h @@ -0,0 +1,221 @@ +#ifndef _ASM_HW_IRQ_H +#define _ASM_HW_IRQ_H + +/* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar */ + +#include +#include +#include +#include +#include +#include + +extern unsigned int nr_irqs_gsi; +extern unsigned int nr_irqs; +#define nr_static_irqs nr_irqs_gsi + +#define IO_APIC_IRQ(irq) (platform_legacy_irq(irq) ? \ + (1 << (irq)) & io_apic_irqs : \ + (irq) < nr_irqs_gsi) + +#define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs) + +#define LEGACY_VECTOR(irq) ((irq) + FIRST_LEGACY_VECTOR) + +typedef struct { + DECLARE_BITMAP(_bits, X86_NR_VECTORS); +} vmask_t; + +struct irq_desc; + +struct arch_irq_desc { + s16 vector; /* vector itself is only 8 bits, */ + s16 old_vector; /* but we use -1 for unassigned */ + /* + * Except for high priority interrupts @cpu_mask may have bits set for + * offline CPUs. Consumers need to be careful to mask this down to + * online ones as necessary. There is supposed to always be a non- + * empty intersection with cpu_online_map. + */ + cpumask_var_t cpu_mask; + cpumask_var_t old_cpu_mask; + cpumask_var_t pending_mask; + vmask_t *used_vectors; + unsigned move_cleanup_count; + u8 move_in_progress : 1; + s8 used; + /* + * Weak reference to domain having permission over this IRQ (which can + * be different from the domain actually having the IRQ assigned) + */ + domid_t creator_domid; +}; + +/* For use with irq_desc.arch.used */ +#define IRQ_UNUSED (0) +#define IRQ_USED (1) +#define IRQ_RESERVED (-1) + +#define IRQ_VECTOR_UNASSIGNED (-1) + +typedef int vector_irq_t[X86_NR_VECTORS]; +DECLARE_PER_CPU(vector_irq_t, vector_irq); + +extern bool opt_noirqbalance; + +#define OPT_IRQ_VECTOR_MAP_DEFAULT 0 /* Do the default thing */ +#define OPT_IRQ_VECTOR_MAP_NONE 1 /* None */ +#define OPT_IRQ_VECTOR_MAP_GLOBAL 2 /* One global vector map (no vector sharing) */ +#define OPT_IRQ_VECTOR_MAP_PERDEV 3 /* Per-device vetor map (no vector sharing w/in a device) */ + +extern int opt_irq_vector_map; + +/* + * Per-cpu current frame pointer - the location of the last exception frame on + * the stack + */ +DECLARE_PER_CPU(struct cpu_user_regs *, __irq_regs); + +static inline struct cpu_user_regs *get_irq_regs(void) +{ + return this_cpu(__irq_regs); +} + +static inline struct cpu_user_regs *set_irq_regs(struct cpu_user_regs *new_regs) +{ + struct cpu_user_regs *old_regs, **pp_regs = &this_cpu(__irq_regs); + + old_regs = *pp_regs; + *pp_regs = new_regs; + return old_regs; +} + + +#define platform_legacy_irq(irq) ((irq) < 16) + +void event_check_interrupt(struct cpu_user_regs *regs); +void invalidate_interrupt(struct cpu_user_regs *regs); +void call_function_interrupt(struct cpu_user_regs *regs); +void apic_timer_interrupt(struct cpu_user_regs *regs); +void error_interrupt(struct cpu_user_regs *regs); +void pmu_apic_interrupt(struct cpu_user_regs *regs); +void spurious_interrupt(struct cpu_user_regs *regs); +void irq_move_cleanup_interrupt(struct cpu_user_regs *regs); + +uint8_t alloc_hipriority_vector(void); + +void set_direct_apic_vector( + uint8_t vector, void (*handler)(struct cpu_user_regs *)); +void alloc_direct_apic_vector( + uint8_t *vector, void (*handler)(struct cpu_user_regs *)); + +void do_IRQ(struct cpu_user_regs *regs); + +void disable_8259A_irq(struct irq_desc *); +void enable_8259A_irq(struct irq_desc *); +int i8259A_irq_pending(unsigned int irq); +void mask_8259A(void); +void unmask_8259A(void); +void init_8259A(int aeoi); +void make_8259A_irq(unsigned int irq); +bool bogus_8259A_irq(unsigned int irq); +int i8259A_suspend(void); +int i8259A_resume(void); + +void setup_IO_APIC(void); +void disable_IO_APIC(void); +void setup_ioapic_dest(void); +vmask_t *io_apic_get_used_vector_map(unsigned int irq); + +extern unsigned int io_apic_irqs; + +DECLARE_PER_CPU(unsigned int, irq_count); + +struct pirq; +struct arch_pirq { + int irq; + union { + struct hvm_pirq { + int emuirq; + struct hvm_pirq_dpci dpci; + } hvm; + }; +}; + +#define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.hvm.dpci : NULL) +#define dpci_pirq(pd) container_of(pd, struct pirq, arch.hvm.dpci) + +int pirq_shared(struct domain *d , int irq); + +int map_domain_pirq(struct domain *d, int pirq, int irq, int type, + void *data); +int unmap_domain_pirq(struct domain *d, int pirq); +int get_free_pirq(struct domain *d, int type); +int get_free_pirqs(struct domain *, unsigned int nr); +void free_domain_pirqs(struct domain *d); +int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq); +int unmap_domain_pirq_emuirq(struct domain *d, int pirq); + +/* Reset irq affinities to match the given CPU mask. */ +void fixup_irqs(const cpumask_t *mask, bool verbose); +void fixup_eoi(void); + +int init_irq_data(void); + +void clear_irq_vector(int irq); + +int irq_to_vector(int irq); +/* + * If grant_access is set the current domain is given permissions over + * the created IRQ. + */ +int create_irq(nodeid_t node, bool grant_access); +void destroy_irq(unsigned int irq); +int assign_irq_vector(int irq, const cpumask_t *); + +extern void irq_complete_move(struct irq_desc *); + +extern struct irq_desc *irq_desc; + +void lock_vector_lock(void); +void unlock_vector_lock(void); + +void setup_vector_irq(unsigned int cpu); + +void move_native_irq(struct irq_desc *); +void move_masked_irq(struct irq_desc *); + +int bind_irq_vector(int irq, int vector, const cpumask_t *); + +void end_nonmaskable_irq(struct irq_desc *, uint8_t vector); +void irq_set_affinity(struct irq_desc *, const cpumask_t *mask); + +int init_domain_irq_mapping(struct domain *); +void cleanup_domain_irq_mapping(struct domain *); + +#define domain_pirq_to_irq(d, pirq) pirq_field(d, pirq, arch.irq, 0) +#define domain_irq_to_pirq(d, irq) ({ \ + void *__ret = radix_tree_lookup(&(d)->arch.irq_pirq, irq); \ + __ret ? radix_tree_ptr_to_int(__ret) : 0; \ +}) +#define PIRQ_ALLOCATED -1 +#define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq, \ + arch.hvm.emuirq, IRQ_UNBOUND) +#define domain_emuirq_to_pirq(d, emuirq) ({ \ + void *__ret = radix_tree_lookup(&(d)->arch.hvm.emuirq_pirq, emuirq);\ + __ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND; \ +}) +#define IRQ_UNBOUND -1 +#define IRQ_PT -2 +#define IRQ_MSI_EMU -3 + +bool cpu_has_pending_apic_eoi(void); + +static inline void arch_move_irqs(struct vcpu *v) { } + +struct msi_info; +int allocate_and_map_gsi_pirq(struct domain *d, int index, int *pirq_p); +int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p, + int type, struct msi_info *msi); + +#endif /* _ASM_HW_IRQ_H */ diff --git a/xen/arch/x86/include/asm/ldt.h b/xen/arch/x86/include/asm/ldt.h new file mode 100644 index 0000000000..58e3e042fc --- /dev/null +++ b/xen/arch/x86/include/asm/ldt.h @@ -0,0 +1,35 @@ + +#ifndef __ARCH_LDT_H +#define __ARCH_LDT_H + +#ifndef __ASSEMBLY__ + +static inline void load_LDT(struct vcpu *v) +{ + seg_desc_t *desc; + unsigned long ents; + + if ( (ents = v->arch.pv.ldt_ents) == 0 ) + lldt(0); + else + { + desc = (!is_pv_32bit_vcpu(v) ? this_cpu(gdt) : this_cpu(compat_gdt)) + + LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY; + _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, SYS_DESC_ldt); + lldt(LDT_SELECTOR); + } +} + +#endif /* !__ASSEMBLY__ */ + +#endif + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/livepatch.h b/xen/arch/x86/include/asm/livepatch.h new file mode 100644 index 0000000000..00aefd2d63 --- /dev/null +++ b/xen/arch/x86/include/asm/livepatch.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +#ifndef __XEN_X86_LIVEPATCH_H__ +#define __XEN_X86_LIVEPATCH_H__ + +#include /* For SZ_* macros. */ + +#define ARCH_PATCH_INSN_SIZE 5 +#define ARCH_LIVEPATCH_RANGE SZ_2G +#define LIVEPATCH_FEATURE X86_FEATURE_ALWAYS + +#endif /* __XEN_X86_LIVEPATCH_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/mach-default/bios_ebda.h b/xen/arch/x86/include/asm/mach-default/bios_ebda.h new file mode 100644 index 0000000000..42de6b2a5b --- /dev/null +++ b/xen/arch/x86/include/asm/mach-default/bios_ebda.h @@ -0,0 +1,15 @@ +#ifndef _MACH_BIOS_EBDA_H +#define _MACH_BIOS_EBDA_H + +/* + * there is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E. + */ +static inline unsigned int get_bios_ebda(void) +{ + unsigned int address = *(unsigned short *)maddr_to_virt(0x40E); + address <<= 4; + return address; /* 0 means none */ +} + +#endif /* _MACH_BIOS_EBDA_H */ diff --git a/xen/arch/x86/include/asm/mach-default/io_ports.h b/xen/arch/x86/include/asm/mach-default/io_ports.h new file mode 100644 index 0000000000..a96d9f6604 --- /dev/null +++ b/xen/arch/x86/include/asm/mach-default/io_ports.h @@ -0,0 +1,30 @@ +/* + * arch/i386/mach-generic/io_ports.h + * + * Machine specific IO port address definition for generic. + * Written by Osamu Tomita + */ +#ifndef _MACH_IO_PORTS_H +#define _MACH_IO_PORTS_H + +/* i8253A PIT registers */ +#define PIT_MODE 0x43 +#define PIT_CH0 0x40 +#define PIT_CH2 0x42 + +/* i8259A PIC registers */ +#define PIC_MASTER_CMD 0x20 +#define PIC_MASTER_IMR 0x21 +#define PIC_MASTER_ISR PIC_MASTER_CMD +#define PIC_MASTER_POLL PIC_MASTER_ISR +#define PIC_MASTER_OCW3 PIC_MASTER_ISR +#define PIC_SLAVE_CMD 0xa0 +#define PIC_SLAVE_IMR 0xa1 + +/* i8259A PIC related value */ +#define PIC_CASCADE_IR 2 +#define MASTER_ICW4_DEFAULT 0x01 +#define SLAVE_ICW4_DEFAULT 0x01 +#define PIC_ICW4_AEOI 2 + +#endif /* !_MACH_IO_PORTS_H */ diff --git a/xen/arch/x86/include/asm/mach-default/irq_vectors.h b/xen/arch/x86/include/asm/mach-default/irq_vectors.h new file mode 100644 index 0000000000..f546aedd87 --- /dev/null +++ b/xen/arch/x86/include/asm/mach-default/irq_vectors.h @@ -0,0 +1,46 @@ +#ifndef _ASM_IRQ_VECTORS_H +#define _ASM_IRQ_VECTORS_H + +/* Processor-initiated interrupts are all high priority. */ +#define SPURIOUS_APIC_VECTOR 0xff +#define ERROR_APIC_VECTOR 0xfe +#define INVALIDATE_TLB_VECTOR 0xfd +#define EVENT_CHECK_VECTOR 0xfc +#define CALL_FUNCTION_VECTOR 0xfb +#define LOCAL_TIMER_VECTOR 0xfa +#define PMU_APIC_VECTOR 0xf9 +/* + * High-priority dynamically-allocated vectors. For interrupts that + * must be higher priority than any guest-bound interrupt. + */ +#define FIRST_HIPRIORITY_VECTOR 0xf1 +#define LAST_HIPRIORITY_VECTOR 0xf8 +/* IRQ0 (timer) is statically allocated but must be high priority. */ +#define IRQ0_VECTOR 0xf0 + +/* Legacy PIC uses vectors 0x20-0x2f. */ +#define FIRST_LEGACY_VECTOR FIRST_DYNAMIC_VECTOR +#define LAST_LEGACY_VECTOR (FIRST_LEGACY_VECTOR + 0xf) + +#ifdef CONFIG_PV32 +#define HYPERCALL_VECTOR 0x82 +#endif + +#define LEGACY_SYSCALL_VECTOR 0x80 + +/* + * Dynamically-allocated vectors available to any driver. Note that the + * legacy vector range is a sub-range of this one, re-used on CPUs not + * sharing vectors with CPU 0. + */ +#define FIRST_DYNAMIC_VECTOR 0x20 +#define LAST_DYNAMIC_VECTOR 0xef +#define NR_DYNAMIC_VECTORS (LAST_DYNAMIC_VECTOR - FIRST_DYNAMIC_VECTOR + 1) + +/* There's no IRQ2 at the PIC. */ +#define IRQ_MOVE_CLEANUP_VECTOR (FIRST_LEGACY_VECTOR + 2) + +#define FIRST_IRQ_VECTOR FIRST_DYNAMIC_VECTOR +#define LAST_IRQ_VECTOR LAST_HIPRIORITY_VECTOR + +#endif /* _ASM_IRQ_VECTORS_H */ diff --git a/xen/arch/x86/include/asm/mach-default/mach_mpspec.h b/xen/arch/x86/include/asm/mach-default/mach_mpspec.h new file mode 100644 index 0000000000..1a4e3f8c4f --- /dev/null +++ b/xen/arch/x86/include/asm/mach-default/mach_mpspec.h @@ -0,0 +1,10 @@ +#ifndef __ASM_MACH_MPSPEC_H +#define __ASM_MACH_MPSPEC_H + +#define MAX_IRQ_SOURCES 256 + +/* Generic (i.e. installer) kernels need lots of bus entries. */ +/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ +#define MAX_MP_BUSSES 260 + +#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/xen/arch/x86/include/asm/mach-generic/mach_apic.h b/xen/arch/x86/include/asm/mach-generic/mach_apic.h new file mode 100644 index 0000000000..b6f6361c60 --- /dev/null +++ b/xen/arch/x86/include/asm/mach-generic/mach_apic.h @@ -0,0 +1,80 @@ +#ifndef __ASM_MACH_APIC_H +#define __ASM_MACH_APIC_H + +#include +#include +#include +#include + +/* ESR was originally disabled in Linux for NUMA-Q. Do we really need to? */ +#define esr_disable (0) + +/* The following are dependent on APIC delivery mode (logical vs. physical). */ +#define INT_DELIVERY_MODE (genapic.int_delivery_mode) +#define INT_DEST_MODE (genapic.int_dest_mode) +#define TARGET_CPUS ((const typeof(cpu_online_map) *)&cpu_online_map) +#define init_apic_ldr (genapic.init_apic_ldr) +#define cpu_mask_to_apicid(mask) ({ \ + /* \ + * There are a number of places where the address of a local variable \ + * gets passed here. The use of ?: in alternative_call() triggers an \ + * "address of ... is always true" warning in such a case with at least \ + * gcc 7 and 8. Hence the seemingly pointless local variable here. \ + */ \ + const cpumask_t *m_ = (mask); \ + alternative_call(genapic.cpu_mask_to_apicid, m_); \ +}) +#define vector_allocation_cpumask(cpu) \ + alternative_call(genapic.vector_allocation_cpumask, cpu) + +static inline void enable_apic_mode(void) +{ + /* Not needed for modern ES7000 which boot in Virtual Wire mode. */ + /*es7000_sw_apic();*/ +} + +#define apicid_to_node(apicid) ((int)apicid_to_node[(u32)apicid]) + +extern u32 bios_cpu_apicid[]; + +static inline int multi_timer_check(int apic, int irq) +{ + return 0; +} + +extern void generic_apic_probe(void); +extern void generic_bigsmp_probe(void); + +/* + * The following functions based around phys_cpu_present_map are disabled in + * some i386 Linux subarchitectures, and in x86_64 'cluster' genapic mode. I'm + * really not sure why, since all local APICs should have distinct physical + * IDs, and we need to know what they are. + */ +static inline int apic_id_registered(void) +{ + return physid_isset(get_apic_id(), + phys_cpu_present_map); +} + +static inline void ioapic_phys_id_map(physid_mask_t *map) +{ + *map = phys_cpu_present_map; +} + +static inline int check_apicid_used(const physid_mask_t *map, int apicid) +{ + return physid_isset(apicid, *map); +} + +static inline int check_apicid_present(int apicid) +{ + return physid_isset(apicid, phys_cpu_present_map); +} + +static inline void set_apicid(int phys_apicid, physid_mask_t *map) +{ + physid_set(phys_apicid, *map); +} + +#endif /* __ASM_MACH_APIC_H */ diff --git a/xen/arch/x86/include/asm/machine_kexec.h b/xen/arch/x86/include/asm/machine_kexec.h new file mode 100644 index 0000000000..ba0d469d07 --- /dev/null +++ b/xen/arch/x86/include/asm/machine_kexec.h @@ -0,0 +1,16 @@ +#ifndef __X86_MACHINE_KEXEC_H__ +#define __X86_MACHINE_KEXEC_H__ + +#define KEXEC_RELOC_FLAG_COMPAT 0x1 /* 32-bit image */ + +#ifndef __ASSEMBLY__ + +extern void kexec_reloc(unsigned long reloc_code, unsigned long reloc_pt, + unsigned long ind_maddr, unsigned long entry_maddr, + unsigned long flags); + +extern unsigned int kexec_reloc_size; + +#endif + +#endif /* __X86_MACHINE_KEXEC_H__ */ diff --git a/xen/arch/x86/include/asm/mc146818rtc.h b/xen/arch/x86/include/asm/mc146818rtc.h new file mode 100644 index 0000000000..803b236c0a --- /dev/null +++ b/xen/arch/x86/include/asm/mc146818rtc.h @@ -0,0 +1,116 @@ +/* + * Machine dependent access functions for RTC registers. + */ +#ifndef _ASM_MC146818RTC_H +#define _ASM_MC146818RTC_H + +#include +#include + +extern spinlock_t rtc_lock; /* serialize CMOS RAM access */ + +/********************************************************************** + * register summary + **********************************************************************/ +#define RTC_SECONDS 0 +#define RTC_SECONDS_ALARM 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +/* RTC_*_alarm is always true if 2 MSBs are set */ +# define RTC_ALARM_DONT_CARE 0xC0 + +#define RTC_DAY_OF_WEEK 6 +#define RTC_DAY_OF_MONTH 7 +#define RTC_MONTH 8 +#define RTC_YEAR 9 + +/* control registers - Moto names + */ +#define RTC_REG_A 10 +#define RTC_REG_B 11 +#define RTC_REG_C 12 +#define RTC_REG_D 13 + +/********************************************************************** + * register details + **********************************************************************/ +#define RTC_FREQ_SELECT RTC_REG_A + +/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, + * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, + * totalling to a max high interval of 2.228 ms. + */ +# define RTC_UIP 0x80 +# define RTC_DIV_CTL 0x70 + /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ +# define RTC_REF_CLCK_4MHZ 0x00 +# define RTC_REF_CLCK_1MHZ 0x10 +# define RTC_REF_CLCK_32KHZ 0x20 + /* 2 values for divider stage reset, others for "testing purposes only" */ +# define RTC_DIV_RESET1 0x60 +# define RTC_DIV_RESET2 0x70 + /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ +# define RTC_RATE_SELECT 0x0F + +/**********************************************************************/ +#define RTC_CONTROL RTC_REG_B +# define RTC_SET 0x80 /* disable updates for clock setting */ +# define RTC_PIE 0x40 /* periodic interrupt enable */ +# define RTC_AIE 0x20 /* alarm interrupt enable */ +# define RTC_UIE 0x10 /* update-finished interrupt enable */ +# define RTC_SQWE 0x08 /* enable square-wave output */ +# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + +/**********************************************************************/ +#define RTC_INTR_FLAGS RTC_REG_C +/* caution - cleared by read */ +# define RTC_IRQF 0x80 /* any of the following 3 is active */ +# define RTC_PF 0x40 +# define RTC_AF 0x20 +# define RTC_UF 0x10 + +/**********************************************************************/ +#define RTC_VALID RTC_REG_D +# define RTC_VRT 0x80 /* valid RAM and time */ +/**********************************************************************/ + +/* example: !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) + * determines if the following two #defines are needed + */ +#ifndef BCD_TO_BIN +#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) +#endif + +#ifndef BIN_TO_BCD +#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) +#endif + + +#ifndef RTC_PORT +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ +#endif + +/* + * The yet supported machines all access the RTC index register via + * an ISA port access but the way to access the date register differs ... + */ +#define CMOS_READ(addr) ({ \ +outb_p((addr),RTC_PORT(0)); \ +inb_p(RTC_PORT(1)); \ +}) +#define CMOS_WRITE(val, addr) ({ \ +outb_p((addr),RTC_PORT(0)); \ +outb_p((val),RTC_PORT(1)); \ +}) + +#define RTC_IRQ 8 + +unsigned int rtc_guest_read(unsigned int port); +void rtc_guest_write(unsigned int port, unsigned int data); + +#endif /* _ASM_MC146818RTC_H */ diff --git a/xen/arch/x86/include/asm/mce.h b/xen/arch/x86/include/asm/mce.h new file mode 100644 index 0000000000..2c63318c08 --- /dev/null +++ b/xen/arch/x86/include/asm/mce.h @@ -0,0 +1,49 @@ +#ifndef _XEN_X86_MCE_H +#define _XEN_X86_MCE_H + +#include +#include + +/* + * Emulate 2 banks for guest + * Bank0: reserved for 'bank0 quirk' occur at some very old processors: + * 1). Intel cpu whose family-model value < 06-1A; + * 2). AMD K7 + * Bank1: used to transfer error info to guest + */ +#define GUEST_MC_BANK_NUM 2 + +/* Filter MSCOD model specific error code to guest */ +#define MCi_STATUS_MSCOD_MASK (~(0xffffULL << 16)) + +/* No mci_ctl since it stick all 1's */ +struct vmce_bank { + uint64_t mci_status; + uint64_t mci_addr; + uint64_t mci_misc; + uint64_t mci_ctl2; +}; + +/* No mcg_ctl since it not expose to guest */ +struct vmce { + uint64_t mcg_cap; + uint64_t mcg_status; + uint64_t mcg_ext_ctl; + spinlock_t lock; + struct vmce_bank bank[GUEST_MC_BANK_NUM]; +}; + +struct domain; +struct vcpu; + +/* Guest vMCE MSRs virtualization */ +extern void vmce_init_vcpu(struct vcpu *); +extern int vmce_restore_vcpu(struct vcpu *, const struct hvm_vmce_vcpu *); +extern int vmce_wrmsr(uint32_t msr, uint64_t val); +extern int vmce_rdmsr(uint32_t msr, uint64_t *val); +extern bool vmce_has_lmce(const struct vcpu *v); +extern int vmce_enable_mca_cap(struct domain *d, uint64_t cap); + +DECLARE_PER_CPU(unsigned int, nr_mce_banks); + +#endif diff --git a/xen/arch/x86/include/asm/mem_access.h b/xen/arch/x86/include/asm/mem_access.h new file mode 100644 index 0000000000..18091610ae --- /dev/null +++ b/xen/arch/x86/include/asm/mem_access.h @@ -0,0 +1,68 @@ +/****************************************************************************** + * include/asm-x86/mem_access.h + * + * Memory access support. + * + * Copyright (c) 2011 GridCentric Inc. (Andres Lagar-Cavilla) + * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) + * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc. + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef __ASM_X86_MEM_ACCESS_H__ +#define __ASM_X86_MEM_ACCESS_H__ + +/* + * Setup vm_event request based on the access (gla is -1ull if not available). + * Handles the rw2rx conversion. Boolean return value indicates if event type + * is syncronous (aka. requires vCPU pause). If the req_ptr has been populated, + * then the caller should use monitor_traps to send the event on the MONITOR + * ring. Once having released get_gfn* locks caller must also xfree the + * request. + */ +bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, + struct npfec npfec, + struct vm_event_st **req_ptr); + +/* Check for emulation and mark vcpu for skipping one instruction + * upon rescheduling if required. */ +bool p2m_mem_access_emulate_check(struct vcpu *v, + const struct vm_event_st *rsp); + +/* Sanity check for mem_access hardware support */ +bool p2m_mem_access_sanity_check(const struct domain *d); + +int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve, + unsigned int altp2m_idx); + +struct xen_hvm_altp2m_suppress_ve_multi; +int p2m_set_suppress_ve_multi(struct domain *d, + struct xen_hvm_altp2m_suppress_ve_multi *suppress_ve); + +int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve, + unsigned int altp2m_idx); + +#endif /*__ASM_X86_MEM_ACCESS_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/mem_paging.h b/xen/arch/x86/include/asm/mem_paging.h new file mode 100644 index 0000000000..d3635e96cf --- /dev/null +++ b/xen/arch/x86/include/asm/mem_paging.h @@ -0,0 +1,42 @@ +/****************************************************************************** + * include/asm-x86/mem_paging.h + * + * Memory paging support. + * + * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef __ASM_X86_MEM_PAGING_H__ +#define __ASM_X86_MEM_PAGING_H__ + +int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg); + +#ifdef CONFIG_MEM_PAGING +# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging) +#else +# define mem_paging_enabled(d) false +#endif + +#endif /*__ASM_X86_MEM_PAGING_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/mem_sharing.h b/xen/arch/x86/include/asm/mem_sharing.h new file mode 100644 index 0000000000..cf7a12f4d2 --- /dev/null +++ b/xen/arch/x86/include/asm/mem_sharing.h @@ -0,0 +1,153 @@ +/****************************************************************************** + * include/asm-x86/mem_sharing.h + * + * Memory sharing support. + * + * Copyright (c) 2009 Citrix Systems, Inc. (Grzegorz Milos) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ +#ifndef __MEM_SHARING_H__ +#define __MEM_SHARING_H__ + +#include +#include + +#ifdef CONFIG_MEM_SHARING + +#define mem_sharing_enabled(d) ((d)->arch.hvm.mem_sharing.enabled) + +/* Auditing of memory sharing code? */ +#ifndef NDEBUG +#define MEM_SHARING_AUDIT 1 +#else +#define MEM_SHARING_AUDIT 0 +#endif + +typedef uint64_t shr_handle_t; + +typedef struct rmap_hashtab { + struct list_head *bucket; + /* + * Overlaps with prev pointer of list_head in union below. + * Unlike the prev pointer, this can be NULL. + */ + void *flag; +} rmap_hashtab_t; + +struct page_sharing_info +{ + struct page_info *pg; /* Back pointer to the page. */ + shr_handle_t handle; /* Globally unique version / handle. */ +#if MEM_SHARING_AUDIT + struct list_head entry; /* List of all shared pages (entry). */ + struct rcu_head rcu_head; /* List of all shared pages (entry). */ +#endif + /* Reverse map of tuples for this shared frame. */ + union { + struct list_head gfns; + rmap_hashtab_t hash_table; + }; +}; + +unsigned int mem_sharing_get_nr_saved_mfns(void); +unsigned int mem_sharing_get_nr_shared_mfns(void); + +/* Only fails with -ENOMEM. Enforce it with a BUG_ON wrapper. */ +int __mem_sharing_unshare_page(struct domain *d, + unsigned long gfn, + bool destroy); + +static inline int mem_sharing_unshare_page(struct domain *d, + unsigned long gfn) +{ + int rc = __mem_sharing_unshare_page(d, gfn, false); + BUG_ON(rc && (rc != -ENOMEM)); + return rc; +} + +static inline bool mem_sharing_is_fork(const struct domain *d) +{ + return d->parent; +} + +int mem_sharing_fork_page(struct domain *d, gfn_t gfn, + bool unsharing); + +/* + * If called by a foreign domain, possible errors are + * -EBUSY -> ring full + * -ENOSYS -> no ring to begin with + * and the foreign mapper is responsible for retrying. + * + * If called by the guest vcpu itself and allow_sleep is set, may + * sleep on a wait queue, so the caller is responsible for not + * holding locks on entry. It may only fail with ENOSYS + * + * If called by the guest vcpu itself and allow_sleep is not set, + * then it's the same as a foreign domain. + */ +int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, + bool allow_sleep); +int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg); +int mem_sharing_domctl(struct domain *d, + struct xen_domctl_mem_sharing_op *mec); + +/* + * Scans the p2m and relinquishes any shared pages, destroying + * those for which this domain holds the final reference. + * Preemptible. + */ +int relinquish_shared_pages(struct domain *d); + +#else + +#define mem_sharing_enabled(d) false + +static inline unsigned int mem_sharing_get_nr_saved_mfns(void) +{ + return 0; +} + +static inline unsigned int mem_sharing_get_nr_shared_mfns(void) +{ + return 0; +} + +static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn) +{ + ASSERT_UNREACHABLE(); + return -EOPNOTSUPP; +} + +static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, + bool allow_sleep) +{ + ASSERT_UNREACHABLE(); + return -EOPNOTSUPP; +} + +static inline bool mem_sharing_is_fork(const struct domain *d) +{ + return false; +} + +static inline int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool lock) +{ + return -EOPNOTSUPP; +} + +#endif + +#endif /* __MEM_SHARING_H__ */ diff --git a/xen/arch/x86/include/asm/microcode.h b/xen/arch/x86/include/asm/microcode.h new file mode 100644 index 0000000000..3b0234e9fa --- /dev/null +++ b/xen/arch/x86/include/asm/microcode.h @@ -0,0 +1,27 @@ +#ifndef ASM_X86__MICROCODE_H +#define ASM_X86__MICROCODE_H + +#include +#include + +#include + +struct cpu_signature { + /* CPU signature (CPUID.1.EAX). */ + unsigned int sig; + + /* Platform Flags. Only applicable to Intel. */ + unsigned int pf; + + /* Microcode Revision. */ + unsigned int rev; +}; + +DECLARE_PER_CPU(struct cpu_signature, cpu_sig); + +void microcode_set_module(unsigned int idx); +int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len); +int early_microcode_init(void); +int microcode_update_one(void); + +#endif /* ASM_X86__MICROCODE_H */ diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h new file mode 100644 index 0000000000..cb90527499 --- /dev/null +++ b/xen/arch/x86/include/asm/mm.h @@ -0,0 +1,655 @@ + +#ifndef __ASM_X86_MM_H__ +#define __ASM_X86_MM_H__ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Per-page-frame information. + * + * Every architecture must ensure the following: + * 1. 'struct page_info' contains a 'struct page_list_entry list'. + * 2. Provide a PFN_ORDER() macro for accessing the order of a free page. + */ +#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) + +#define PG_shift(idx) (BITS_PER_LONG - (idx)) +#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) + + /* The following page types are MUTUALLY EXCLUSIVE. */ +#define PGT_none PG_mask(0, 3) /* no special uses of this page */ +#define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */ +#define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */ +#define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */ +#define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */ +#define PGT_seg_desc_page PG_mask(5, 3) /* using this page in a GDT/LDT? */ +#define PGT_shared_page PG_mask(6, 3) /* CoW sharable page */ +#define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */ +#define PGT_type_mask PG_mask(7, 3) /* Bits 61-63. */ + + /* Page is locked? */ +#define _PGT_locked PG_shift(4) +#define PGT_locked PG_mask(1, 4) + /* Owning guest has pinned this page to its current type? */ +#define _PGT_pinned PG_shift(5) +#define PGT_pinned PG_mask(1, 5) + /* Has this page been validated for use as its current type? */ +#define _PGT_validated PG_shift(6) +#define PGT_validated PG_mask(1, 6) + /* PAE only: is this an L2 page directory containing Xen-private mappings? */ +#ifdef CONFIG_PV32 +#define _PGT_pae_xen_l2 PG_shift(7) +#define PGT_pae_xen_l2 PG_mask(1, 7) +#else +#define PGT_pae_xen_l2 0 +#endif +/* Has this page been *partially* validated for use as its current type? */ +#define _PGT_partial PG_shift(8) +#define PGT_partial PG_mask(1, 8) + + /* Count of uses of this frame as its current type. */ +#define PGT_count_width PG_shift(8) +#define PGT_count_mask ((1UL<count_info&PGC_state) == PGC_state_##st) +/* Page is not reference counted (see below for caveats) */ +#define _PGC_extra PG_shift(10) +#define PGC_extra PG_mask(1, 10) + +/* Count of references to this frame. */ +#define PGC_count_width PG_shift(10) +#define PGC_count_mask ((1UL<count_info & PGC_xen_heap) +#define is_xen_heap_mfn(mfn) \ + (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) +#define is_xen_fixed_mfn(mfn) \ + (((mfn_to_maddr(mfn)) >= __pa(_stext)) && \ + ((mfn_to_maddr(mfn)) <= __pa(__2M_rwdata_end - 1))) + +#define PRtype_info "016lx"/* should only be used for printk's */ + +/* The number of out-of-sync shadows we allow per vcpu (prime, please) */ +#define SHADOW_OOS_PAGES 3 + +/* OOS fixup entries */ +#define SHADOW_OOS_FIXUPS 2 + +#define page_get_owner(_p) \ + ((struct domain *)((_p)->v.inuse._domain ? \ + pdx_to_virt((_p)->v.inuse._domain) : NULL)) +#define page_set_owner(_p,_d) \ + ((_p)->v.inuse._domain = (_d) ? virt_to_pdx(_d) : 0) + +#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma)))) + +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) +extern unsigned long max_page; +extern unsigned long total_pages; +void init_frametable(void); + +#define PDX_GROUP_SHIFT L2_PAGETABLE_SHIFT + +/* Convert between Xen-heap virtual addresses and page-info structures. */ +static inline struct page_info *__virt_to_page(const void *v) +{ + unsigned long va = (unsigned long)v; + + ASSERT(va >= XEN_VIRT_START); + ASSERT(va < DIRECTMAP_VIRT_END); + if ( va < XEN_VIRT_END ) + va += DIRECTMAP_VIRT_START - XEN_VIRT_START + xen_phys_start; + else + ASSERT(va >= DIRECTMAP_VIRT_START); + return frame_table + ((va - DIRECTMAP_VIRT_START) >> PAGE_SHIFT); +} + +static inline void *__page_to_virt(const struct page_info *pg) +{ + ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_SIZE); + /* + * (sizeof(*pg) & -sizeof(*pg)) selects the LS bit of sizeof(*pg). The + * division and re-multiplication avoids one shift when sizeof(*pg) is a + * power of two (otherwise there would be a right shift followed by a + * left shift, which the compiler can't know it can fold into one). + */ + return (void *)(DIRECTMAP_VIRT_START + + ((unsigned long)pg - FRAMETABLE_VIRT_START) / + (sizeof(*pg) / (sizeof(*pg) & -sizeof(*pg))) * + (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg)))); +} + +int devalidate_page(struct page_info *page, unsigned long type, + int preemptible); + +void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d); +void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn, + const struct domain *d, mfn_t sl4mfn, bool ro_mpt); +bool fill_ro_mpt(mfn_t mfn); +void zap_ro_mpt(mfn_t mfn); + +bool is_iomem_page(mfn_t mfn); + +/* + * Pages with no owner which may get passed to functions wanting to + * refcount them can be marked PGC_extra to bypass this refcounting (which + * would fail due to the lack of an owner). + * + * (For pages with owner PGC_extra has different meaning.) + */ +static inline void page_suppress_refcounting(struct page_info *pg) +{ + ASSERT(!page_get_owner(pg)); + pg->count_info |= PGC_extra; +} + +static inline bool page_refcounting_suppressed(const struct page_info *pg) +{ + return !page_get_owner(pg) && (pg->count_info & PGC_extra); +} + +struct platform_bad_page { + unsigned long mfn; + unsigned int order; +}; + +const struct platform_bad_page *get_platform_badpages(unsigned int *array_size); + +/* Per page locks: + * page_lock() is used for pte serialization. + * + * All users of page lock for pte serialization live in mm.c, use it + * to lock a page table page during pte updates, do not take other locks within + * the critical section delimited by page_lock/unlock, and perform no + * nesting. + * + * The use of PGT_locked in mem_sharing does not collide, since mem_sharing is + * only supported for hvm guests, which do not have PV PTEs updated. + */ +int page_lock(struct page_info *page); +void page_unlock(struct page_info *page); + +void put_page_type(struct page_info *page); +int get_page_type(struct page_info *page, unsigned long type); +int put_page_type_preemptible(struct page_info *page); +int get_page_type_preemptible(struct page_info *page, unsigned long type); +int put_old_guest_table(struct vcpu *); +int get_page_from_l1e( + l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner); +void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner); + +static inline struct page_info *get_page_from_mfn(mfn_t mfn, struct domain *d) +{ + struct page_info *page = mfn_to_page(mfn); + + if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) ) + { + gdprintk(XENLOG_WARNING, + "Could not get page ref for mfn %"PRI_mfn"\n", mfn_x(mfn)); + return NULL; + } + + return page; +} + +static inline void put_page_and_type(struct page_info *page) +{ + put_page_type(page); + put_page(page); +} + +static inline int put_page_and_type_preemptible(struct page_info *page) +{ + int rc = put_page_type_preemptible(page); + + if ( likely(rc == 0) ) + put_page(page); + return rc; +} + +static inline int get_page_and_type(struct page_info *page, + struct domain *domain, + unsigned long type) +{ + int rc = get_page(page, domain); + + if ( likely(rc) && unlikely(!get_page_type(page, type)) ) + { + put_page(page); + rc = 0; + } + + return rc; +} + +#define ASSERT_PAGE_IS_TYPE(_p, _t) \ + ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \ + ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0) +#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \ + ASSERT(((_p)->count_info & PGC_count_mask) != 0); \ + ASSERT(page_get_owner(_p) == (_d)) + +extern paddr_t mem_hotplug; + +/****************************************************************************** + * With shadow pagetables, the different kinds of address start + * to get get confusing. + * + * Virtual addresses are what they usually are: the addresses that are used + * to accessing memory while the guest is running. The MMU translates from + * virtual addresses to machine addresses. + * + * (Pseudo-)physical addresses are the abstraction of physical memory the + * guest uses for allocation and so forth. For the purposes of this code, + * we can largely ignore them. + * + * Guest frame numbers (gfns) are the entries that the guest puts in its + * pagetables. For normal paravirtual guests, they are actual frame numbers, + * with the translation done by the guest. + * + * Machine frame numbers (mfns) are the entries that the hypervisor puts + * in the shadow page tables. + * + * Elsewhere in the xen code base, the name "gmfn" is generally used to refer + * to a "machine frame number, from the guest's perspective", or in other + * words, pseudo-physical frame numbers. However, in the shadow code, the + * term "gmfn" means "the mfn of a guest page"; this combines naturally with + * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a + * guest L2 page), etc... + */ + +/* + * The MPT (machine->physical mapping table) is an array of word-sized + * values, indexed on machine frame number. It is expected that guest OSes + * will use it to store a "physical" frame number to give the appearance of + * contiguous (or near contiguous) physical memory. + */ +#undef machine_to_phys_mapping +#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START) +#define INVALID_M2P_ENTRY (~0UL) +#define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1)))) +#define SHARED_M2P_ENTRY (~0UL - 1UL) +#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) + +/* + * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until + * the machine_to_phys_mapping is actually set up. + */ +extern bool machine_to_phys_mapping_valid; + +void set_gpfn_from_mfn(unsigned long mfn, unsigned long pfn); + +extern struct rangeset *mmio_ro_ranges; + +#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) + +#define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) +#define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) + +#ifdef MEMORY_GUARD +void memguard_guard_range(void *p, unsigned long l); +void memguard_unguard_range(void *p, unsigned long l); +#else +#define memguard_guard_range(_p,_l) ((void)0) +#define memguard_unguard_range(_p,_l) ((void)0) +#endif + +void memguard_guard_stack(void *p); +void memguard_unguard_stack(void *p); + +struct mmio_ro_emulate_ctxt { + unsigned long cr2; + unsigned int seg, bdf; +}; + +extern int mmio_ro_emulated_write(enum x86_segment seg, + unsigned long offset, + void *p_data, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt); +extern int mmcfg_intercept_write(enum x86_segment seg, + unsigned long offset, + void *p_data, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt); + +int audit_adjust_pgtables(struct domain *d, int dir, int noisy); + +extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs); +extern int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs); + +#ifndef NDEBUG + +#define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 ) +#define AUDIT_ERRORS_OK ( 1u << 1 ) +#define AUDIT_QUIET ( 1u << 2 ) + +void _audit_domain(struct domain *d, int flags); +#define audit_domain(_d) _audit_domain((_d), AUDIT_ERRORS_OK) +void audit_domains(void); + +#else + +#define _audit_domain(_d, _f) ((void)0) +#define audit_domain(_d) ((void)0) +#define audit_domains() ((void)0) + +#endif + +void make_cr3(struct vcpu *v, mfn_t mfn); +void update_cr3(struct vcpu *v); +int vcpu_destroy_pagetables(struct vcpu *); +void *do_page_walk(struct vcpu *v, unsigned long addr); + +/* Allocator functions for Xen pagetables. */ +mfn_t alloc_xen_pagetable(void); +void free_xen_pagetable(mfn_t mfn); +void *alloc_mapped_pagetable(mfn_t *pmfn); + +l1_pgentry_t *virt_to_xen_l1e(unsigned long v); + +int __sync_local_execstate(void); + +/* Arch-specific portion of memory_op hypercall. */ +long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg); +long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg); +int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void)); +int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void)); + +#define NIL(type) ((type *)-sizeof(type)) +#define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr)))) + +int create_perdomain_mapping(struct domain *, unsigned long va, + unsigned int nr, l1_pgentry_t **, + struct page_info **); +void destroy_perdomain_mapping(struct domain *, unsigned long va, + unsigned int nr); +void free_perdomain_mappings(struct domain *); + +extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm); + +void domain_set_alloc_bitsize(struct domain *d); +unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits); + +unsigned long domain_get_maximum_gpfn(struct domain *d); + +/* Definition of an mm lock: spinlock with extra fields for debugging */ +typedef struct mm_lock { + spinlock_t lock; + int unlock_level; + int locker; /* processor which holds the lock */ + const char *locker_function; /* func that took it */ +} mm_lock_t; + +typedef struct mm_rwlock { + percpu_rwlock_t lock; + int unlock_level; + int recurse_count; + int locker; /* CPU that holds the write lock */ + const char *locker_function; /* func that took it */ +} mm_rwlock_t; + +#define arch_free_heap_page(d, pg) \ + page_list_del2(pg, page_to_list(d, pg), &(d)->arch.relmem_list) + +extern const char zero_page[]; + +/* Build a 32bit PSE page table using 4MB pages. */ +void write_32bit_pse_identmap(uint32_t *l2); + +/* + * x86 maps part of physical memory via the directmap region. + * Return whether the input MFN falls in that range. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END); + + return mfn <= (virt_to_mfn(eva - 1) + 1); +} + +#endif /* __ASM_X86_MM_H__ */ diff --git a/xen/arch/x86/include/asm/monitor.h b/xen/arch/x86/include/asm/monitor.h new file mode 100644 index 0000000000..01c6d63bb9 --- /dev/null +++ b/xen/arch/x86/include/asm/monitor.h @@ -0,0 +1,126 @@ +/* + * include/asm-x86/monitor.h + * + * Arch-specific monitor_op domctl handler. + * + * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) + * Copyright (c) 2016, Bitdefender S.R.L. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __ASM_X86_MONITOR_H__ +#define __ASM_X86_MONITOR_H__ + +#include + +#define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index)) + +struct monitor_msr_bitmap { + DECLARE_BITMAP(low, 8192); + DECLARE_BITMAP(hypervisor, 8192); + DECLARE_BITMAP(high, 8192); +}; + +static inline +void arch_monitor_allow_userspace(struct domain *d, bool allow_userspace) +{ + d->arch.monitor.guest_request_userspace_enabled = allow_userspace; +} + +static inline +int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop) +{ + int rc = 0; + + switch ( mop->op ) + { + case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP: + domain_pause(d); + /* + * Enabling mem_access_emulate_each_rep without a vm_event subscriber + * is meaningless. + */ + if ( d->max_vcpus && d->vcpu[0] && d->vcpu[0]->arch.vm_event ) + d->arch.mem_access_emulate_each_rep = !!mop->event; + else + rc = -EINVAL; + + domain_unpause(d); + break; + + case XEN_DOMCTL_MONITOR_OP_CONTROL_REGISTERS: + d->arch.monitor.control_register_values = true; + break; + + default: + rc = -EOPNOTSUPP; + } + + return rc; +} + +static inline uint32_t arch_monitor_get_capabilities(struct domain *d) +{ + uint32_t capabilities = 0; + + /* + * At the moment only Intel and AMD HVM domains are supported. However, + * event delivery could be extended to PV domains. + */ + if ( !is_hvm_domain(d) ) + return capabilities; + + capabilities = ((1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) | + (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) | + (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) | + (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) | + (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) | + (1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) | + (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) | + (1U << XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED) | + (1U << XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT)); + + if ( hvm_is_singlestep_supported() ) + capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP); + + if ( hvm_has_set_descriptor_access_exiting() ) + capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS); + + return capabilities; +} + +int arch_monitor_domctl_event(struct domain *d, + struct xen_domctl_monitor_op *mop); + +#ifdef CONFIG_HVM + +int arch_monitor_init_domain(struct domain *d); + +void arch_monitor_cleanup_domain(struct domain *d); + +#else + +static inline int arch_monitor_init_domain(struct domain *d) +{ + return -EOPNOTSUPP; +} + +static inline void arch_monitor_cleanup_domain(struct domain *d) {} + +#endif + +bool monitored_msr(const struct domain *d, u32 msr); +bool monitored_msr_onchangeonly(const struct domain *d, u32 msr); + +#endif /* __ASM_X86_MONITOR_H__ */ diff --git a/xen/arch/x86/include/asm/mpspec.h b/xen/arch/x86/include/asm/mpspec.h new file mode 100644 index 0000000000..1246eece0b --- /dev/null +++ b/xen/arch/x86/include/asm/mpspec.h @@ -0,0 +1,73 @@ +#ifndef __ASM_MPSPEC_H +#define __ASM_MPSPEC_H + +#include +#include +#include + +extern unsigned char mp_bus_id_to_type[MAX_MP_BUSSES]; + +extern bool def_to_bigsmp; +extern unsigned int boot_cpu_physical_apicid; +extern bool smp_found_config; +extern void find_smp_config (void); +extern void get_smp_config (void); +extern unsigned char apic_version [MAX_APICS]; +extern int mp_irq_entries; +extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; +extern int mpc_default_type; +extern unsigned long mp_lapic_addr; +extern bool pic_mode; + +#ifdef CONFIG_ACPI +extern int mp_register_lapic(u32 id, bool enabled, bool hotplug); +extern void mp_unregister_lapic(uint32_t apic_id, uint32_t cpu); +extern void mp_register_lapic_address (u64 address); +extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); +extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); +extern void mp_config_acpi_legacy_irqs (void); +extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); +#endif /* CONFIG_ACPI */ + +#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) + +struct physid_mask +{ + unsigned long mask[PHYSID_ARRAY_SIZE]; +}; + +typedef struct physid_mask physid_mask_t; + +#define physid_set(physid, map) set_bit(physid, (map).mask) +#define physid_clear(physid, map) clear_bit(physid, (map).mask) +#define physid_isset(physid, map) test_bit(physid, (map).mask) +#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) + +#define first_physid(map) find_first_bit((map).mask, \ + MAX_APICS) +#define next_physid(id, map) find_next_bit((map).mask, \ + MAX_APICS, (id) + 1) +#define last_physid(map) ({ \ + const unsigned long *mask = (map).mask; \ + unsigned int id, last = MAX_APICS; \ + for (id = find_first_bit(mask, MAX_APICS); id < MAX_APICS; \ + id = find_next_bit(mask, MAX_APICS, (id) + 1)) \ + last = id; \ + last; \ +}) + +#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) +#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) +#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) +#define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) +#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) +#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) +#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) + +#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } +#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } + +extern physid_mask_t phys_cpu_present_map; + +#endif + diff --git a/xen/arch/x86/include/asm/mpspec_def.h b/xen/arch/x86/include/asm/mpspec_def.h new file mode 100644 index 0000000000..b17ec41426 --- /dev/null +++ b/xen/arch/x86/include/asm/mpspec_def.h @@ -0,0 +1,188 @@ +#ifndef __ASM_MPSPEC_DEF_H +#define __ASM_MPSPEC_DEF_H + +/* + * Structure definitions for SMP machines following the + * Intel Multiprocessing Specification 1.1 and 1.4. + */ + +/* + * This tag identifies where the SMP configuration + * information is. + */ + +#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') + +#define MAX_MPC_ENTRY 1024 +#define MAX_APICS MAX(256, 4 * NR_CPUS) + +struct intel_mp_floating +{ + char mpf_signature[4]; /* "_MP_" */ + unsigned int mpf_physptr; /* Configuration table address */ + unsigned char mpf_length; /* Our length (paragraphs) */ + unsigned char mpf_specification;/* Specification version */ + unsigned char mpf_checksum; /* Checksum (makes sum 0) */ + unsigned char mpf_feature1; /* Standard or configuration ? */ + unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ + unsigned char mpf_feature3; /* Unused (0) */ + unsigned char mpf_feature4; /* Unused (0) */ + unsigned char mpf_feature5; /* Unused (0) */ +}; + +struct mp_config_table +{ + char mpc_signature[4]; +#define MPC_SIGNATURE "PCMP" + unsigned short mpc_length; /* Size of table */ + char mpc_spec; /* 0x01 */ + char mpc_checksum; + char mpc_oem[8]; + char mpc_productid[12]; + unsigned int mpc_oemptr; /* 0 if not present */ + unsigned short mpc_oemsize; /* 0 if not present */ + unsigned short mpc_oemcount; + unsigned int mpc_lapic; /* APIC address */ + unsigned int reserved; +}; + +/* Followed by entries */ + +#define MP_PROCESSOR 0 +#define MP_BUS 1 +#define MP_IOAPIC 2 +#define MP_INTSRC 3 +#define MP_LINTSRC 4 +#define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ + +struct mpc_config_processor +{ + unsigned char mpc_type; + unsigned char mpc_apicid; /* Local APIC number */ + unsigned char mpc_apicver; /* Its versions */ + unsigned char mpc_cpuflag; +#define CPU_ENABLED 1 /* Processor is available */ +#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ + unsigned int mpc_cpufeature; +#define CPU_STEPPING_MASK 0x0F +#define CPU_MODEL_MASK 0xF0 +#define CPU_FAMILY_MASK 0xF00 + unsigned int mpc_featureflag; /* CPUID feature value */ + unsigned int mpc_reserved[2]; +}; + +struct mpc_config_bus +{ + unsigned char mpc_type; + unsigned char mpc_busid; + unsigned char mpc_bustype[6]; +}; + +/* List of Bus Type string values, Intel MP Spec. */ +#define BUSTYPE_EISA "EISA" +#define BUSTYPE_ISA "ISA" +#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ +#define BUSTYPE_MCA "MCA" +#define BUSTYPE_VL "VL" /* Local bus */ +#define BUSTYPE_PCI "PCI" +#define BUSTYPE_PCMCIA "PCMCIA" +#define BUSTYPE_CBUS "CBUS" +#define BUSTYPE_CBUSII "CBUSII" +#define BUSTYPE_FUTURE "FUTURE" +#define BUSTYPE_MBI "MBI" +#define BUSTYPE_MBII "MBII" +#define BUSTYPE_MPI "MPI" +#define BUSTYPE_MPSA "MPSA" +#define BUSTYPE_NUBUS "NUBUS" +#define BUSTYPE_TC "TC" +#define BUSTYPE_VME "VME" +#define BUSTYPE_XPRESS "XPRESS" +#define BUSTYPE_NEC98 "NEC98" + +struct mpc_config_ioapic +{ + unsigned char mpc_type; + unsigned char mpc_apicid; + unsigned char mpc_apicver; + unsigned char mpc_flags; +#define MPC_APIC_USABLE 0x01 + unsigned int mpc_apicaddr; +}; + +struct mpc_config_intsrc +{ + unsigned char mpc_type; + unsigned char mpc_irqtype; + unsigned short mpc_irqflag; + unsigned char mpc_srcbus; + unsigned char mpc_srcbusirq; + unsigned char mpc_dstapic; + unsigned char mpc_dstirq; +}; + +enum mp_irq_source_types { + mp_INT = 0, + mp_NMI = 1, + mp_SMI = 2, + mp_ExtINT = 3 +}; + +#define MP_IRQDIR_DEFAULT 0 +#define MP_IRQDIR_HIGH 1 +#define MP_IRQDIR_LOW 3 + + +struct mpc_config_lintsrc +{ + unsigned char mpc_type; + unsigned char mpc_irqtype; + unsigned short mpc_irqflag; + unsigned char mpc_srcbusid; + unsigned char mpc_srcbusirq; + unsigned char mpc_destapic; +#define MP_APIC_ALL 0xFF + unsigned char mpc_destapiclint; +}; + +struct mp_config_oemtable +{ + char oem_signature[4]; +#define MPC_OEM_SIGNATURE "_OEM" + unsigned short oem_length; /* Size of table */ + char oem_rev; /* 0x01 */ + char oem_checksum; + char mpc_oem[8]; +}; + +struct mpc_config_translation +{ + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; +}; + +/* + * Default configurations + * + * 1 2 CPU ISA 82489DX + * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining + * 3 2 CPU EISA 82489DX + * 4 2 CPU MCA 82489DX + * 5 2 CPU ISA+PCI + * 6 2 CPU EISA+PCI + * 7 2 CPU MCA+PCI + */ + +enum mp_bustype { + MP_BUS_ISA = 1, + MP_BUS_EISA, + MP_BUS_PCI, + MP_BUS_MCA, + MP_BUS_NEC98 +}; +#endif + diff --git a/xen/arch/x86/include/asm/msi.h b/xen/arch/x86/include/asm/msi.h new file mode 100644 index 0000000000..e228b0f3f3 --- /dev/null +++ b/xen/arch/x86/include/asm/msi.h @@ -0,0 +1,256 @@ +#ifndef __ASM_MSI_H +#define __ASM_MSI_H + +#include +#include +#include +#include + +/* + * Constants for Intel APIC based MSI messages. + */ + +/* + * Shifts for MSI data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR_MASK 0x000000ff +#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) + +#define MSI_DATA_DELIVERY_MODE_SHIFT 8 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) +#define MSI_DATA_DELIVERY_MODE_MASK 0x00000700 + +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) + +#define MSI_DATA_TRIGGER_SHIFT 15 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_MASK 0x00008000 + +/* + * Shift/mask fields for msi address + */ + +#define MSI_ADDR_BASE_HI 0 +#define MSI_ADDR_BASE_LO 0xfee00000 +#define MSI_ADDR_BASE_MASK (~0xfffff) +#define MSI_ADDR_HEADER MSI_ADDR_BASE_LO + +#define MSI_ADDR_DESTMODE_SHIFT 2 +#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT) +#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT) +#define MSI_ADDR_DESTMODE_MASK 0x4 + +#define MSI_ADDR_REDIRECTION_SHIFT 3 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) +#define MSI_ADDR_REDIRECTION_MASK (1 << MSI_ADDR_REDIRECTION_SHIFT) + +#define MSI_ADDR_DEST_ID_SHIFT 12 +#define MSI_ADDR_DEST_ID_MASK 0x00ff000 +#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) + +/* MAX fixed pages reserved for mapping MSIX tables. */ +#define FIX_MSIX_MAX_PAGES 512 + +struct msi_info { + u16 seg; + u8 bus; + u8 devfn; + int irq; + int entry_nr; + uint64_t table_base; +}; + +struct msi_msg { + union { + u64 address; /* message address */ + struct { + u32 address_lo; /* message address low 32 bits */ + u32 address_hi; /* message address high 32 bits */ + }; + }; + u32 data; /* 16 bits of msi message data */ + u32 dest32; /* used when Interrupt Remapping with EIM is enabled */ +}; + +struct irq_desc; +struct hw_interrupt_type; +struct msi_desc; +/* Helper functions */ +extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc); +extern void pci_disable_msi(struct msi_desc *desc); +extern int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool off); +extern void pci_cleanup_msi(struct pci_dev *pdev); +extern int setup_msi_irq(struct irq_desc *, struct msi_desc *); +extern int __setup_msi_irq(struct irq_desc *, struct msi_desc *, + const struct hw_interrupt_type *); +extern void teardown_msi_irq(int irq); +extern int msi_free_vector(struct msi_desc *entry); +extern int pci_restore_msi_state(struct pci_dev *pdev); +extern int pci_reset_msix_state(struct pci_dev *pdev); + +struct msi_desc { + struct msi_attrib { + __u8 type; /* {0: unused, 5h:MSI, 11h:MSI-X} */ + __u8 pos; /* Location of the MSI capability */ + __u8 maskbit : 1; /* mask/pending bit supported ? */ + __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ + __u8 host_masked : 1; + __u8 guest_masked : 1; + __u16 entry_nr; /* specific enabled entry */ + } msi_attrib; + + bool irte_initialized; + uint8_t gvec; /* guest vector. valid when pi_desc isn't NULL */ + const struct pi_desc *pi_desc; /* pointer to posted descriptor */ + + struct list_head list; + + union { + void __iomem *mask_base;/* va for the entry in mask table */ + struct { + unsigned int nvec;/* number of vectors */ + unsigned int mpos;/* location of mask register */ + } msi; + unsigned int hpet_id; /* HPET (dev is NULL) */ + }; + struct pci_dev *dev; + int irq; + int remap_index; /* index in interrupt remapping table */ + + struct msi_msg msg; /* Last set MSI message */ +}; + +/* + * Values stored into msi_desc.msi_attrib.pos for non-PCI devices + * (msi_desc.msi_attrib.type is zero): + */ +#define MSI_TYPE_UNKNOWN 0 +#define MSI_TYPE_HPET 1 +#define MSI_TYPE_IOMMU 2 + +int msi_maskable_irq(const struct msi_desc *); +int msi_free_irq(struct msi_desc *entry); + +/* + * Assume the maximum number of hot plug slots supported by the system is about + * ten. The worstcase is that each of these slots is hot-added with a device, + * which has two MSI/MSI-X capable functions. To avoid any MSI-X driver, which + * attempts to request all available vectors, NR_HP_RESERVED_VECTORS is defined + * as below to ensure at least one message is assigned to each detected MSI/ + * MSI-X device function. + */ +#define NR_HP_RESERVED_VECTORS 20 + +#define msi_control_reg(base) (base + PCI_MSI_FLAGS) +#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) +#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) +#define msi_data_reg(base, is64bit) \ + ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 ) +#define msi_mask_bits_reg(base, is64bit) \ + ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) +#define msi_pending_bits_reg(base, is64bit) \ + ((base) + PCI_MSI_MASK_BIT + ((is64bit) ? 4 : 0)) +#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE +#define multi_msi_capable(control) \ + (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1)) +#define multi_msi_enable(control, num) \ + control |= (((fls(num) - 1) << 4) & PCI_MSI_FLAGS_QSIZE); +#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) +#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) +#define msi_enable(control, num) multi_msi_enable(control, num); \ + control |= PCI_MSI_FLAGS_ENABLE + +#define msix_control_reg(base) (base + PCI_MSIX_FLAGS) +#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE) +#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA) +#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE +#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE +#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) +#define msix_unmask(address) (address & ~PCI_MSIX_VECTOR_BITMASK) +#define msix_mask(address) (address | PCI_MSIX_VECTOR_BITMASK) + +/* + * MSI Defined Data Structures + */ + +struct __packed msg_data { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u32 vector : 8; + __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ + __u32 reserved_1 : 3; + __u32 level : 1; /* 0: deassert | 1: assert */ + __u32 trigger : 1; /* 0: edge | 1: level */ + __u32 reserved_2 : 16; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u32 reserved_2 : 16; + __u32 trigger : 1; /* 0: edge | 1: level */ + __u32 level : 1; /* 0: deassert | 1: assert */ + __u32 reserved_1 : 3; + __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ + __u32 vector : 8; +#else +#error "Bitfield endianness not defined! Check your byteorder.h" +#endif +}; + +struct __packed msg_address { + union { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u32 reserved_1 : 2; + __u32 dest_mode : 1; /*0:physic | 1:logic */ + __u32 redirection_hint: 1; /*0: dedicated CPU + 1: lowest priority */ + __u32 reserved_2 : 4; + __u32 dest_id : 24; /* Destination ID */ +#elif defined(__BIG_ENDIAN_BITFIELD) + __u32 dest_id : 24; /* Destination ID */ + __u32 reserved_2 : 4; + __u32 redirection_hint: 1; /*0: dedicated CPU + 1: lowest priority */ + __u32 dest_mode : 1; /*0:physic | 1:logic */ + __u32 reserved_1 : 2; +#else +#error "Bitfield endianness not defined! Check your byteorder.h" +#endif + }u; + __u32 value; + }lo_address; + __u32 hi_address; +}; + +#define MAX_MSIX_TABLE_ENTRIES (PCI_MSIX_FLAGS_QSIZE + 1) +#define MAX_MSIX_TABLE_PAGES PFN_UP(MAX_MSIX_TABLE_ENTRIES * \ + PCI_MSIX_ENTRY_SIZE + \ + (~PCI_MSIX_BIRMASK & (PAGE_SIZE - 1))) + +struct arch_msix { + unsigned int nr_entries, used_entries; + struct { + unsigned long first, last; + } table, pba; + int table_refcnt[MAX_MSIX_TABLE_PAGES]; + int table_idx[MAX_MSIX_TABLE_PAGES]; + spinlock_t table_lock; + bool host_maskall, guest_maskall; + domid_t warned; +}; + +void early_msi_init(void); +void msi_compose_msg(unsigned vector, const cpumask_t *mask, + struct msi_msg *msg); +void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable); +void mask_msi_irq(struct irq_desc *); +void unmask_msi_irq(struct irq_desc *); +void guest_mask_msi_irq(struct irq_desc *, bool mask); +void ack_nonmaskable_msi_irq(struct irq_desc *); +void set_msi_affinity(struct irq_desc *, const cpumask_t *); + +#endif /* __ASM_MSI_H */ diff --git a/xen/arch/x86/include/asm/msr-index.h b/xen/arch/x86/include/asm/msr-index.h new file mode 100644 index 0000000000..ab68ef2681 --- /dev/null +++ b/xen/arch/x86/include/asm/msr-index.h @@ -0,0 +1,671 @@ +#ifndef __ASM_MSR_INDEX_H +#define __ASM_MSR_INDEX_H + +/* + * CPU model specific register (MSR) numbers + * + * Definitions for an MSR should follow this style: + * + * #define MSR_$NAME 0x$INDEX + * #define $NAME_$FIELD1 (_AC($X, ULL) << $POS1) + * #define $NAME_$FIELD2 (_AC($Y, ULL) << $POS2) + * + * Blocks of related constants should be sorted by MSR index. The constant + * names should be as concise as possible, and the bit names may have an + * abbreviated name. Exceptions will be considered on a case-by-case basis. + */ + +#define MSR_APIC_BASE 0x0000001b +#define APIC_BASE_BSP (_AC(1, ULL) << 8) +#define APIC_BASE_EXTD (_AC(1, ULL) << 10) +#define APIC_BASE_ENABLE (_AC(1, ULL) << 11) +#define APIC_BASE_ADDR_MASK 0x000ffffffffff000ULL + +#define MSR_TEST_CTRL 0x00000033 +#define TEST_CTRL_SPLITLOCK_DETECT (_AC(1, ULL) << 29) +#define TEST_CTRL_SPLITLOCK_DISABLE (_AC(1, ULL) << 31) + +#define MSR_INTEL_CORE_THREAD_COUNT 0x00000035 +#define MSR_CTC_THREAD_MASK 0x0000ffff +#define MSR_CTC_CORE_MASK 0xffff0000 + +#define MSR_SPEC_CTRL 0x00000048 +#define SPEC_CTRL_IBRS (_AC(1, ULL) << 0) +#define SPEC_CTRL_STIBP (_AC(1, ULL) << 1) +#define SPEC_CTRL_SSBD (_AC(1, ULL) << 2) +#define SPEC_CTRL_PSFD (_AC(1, ULL) << 7) + +#define MSR_PRED_CMD 0x00000049 +#define PRED_CMD_IBPB (_AC(1, ULL) << 0) + +#define MSR_PPIN_CTL 0x0000004e +#define PPIN_LOCKOUT (_AC(1, ULL) << 0) +#define PPIN_ENABLE (_AC(1, ULL) << 1) +#define MSR_PPIN 0x0000004f + +#define MSR_CORE_CAPABILITIES 0x000000cf +#define CORE_CAPS_SPLITLOCK_DETECT (_AC(1, ULL) << 5) + +#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 +#define NHM_C3_AUTO_DEMOTE (_AC(1, ULL) << 25) +#define NHM_C1_AUTO_DEMOTE (_AC(1, ULL) << 26) +#define ATM_LNC_C6_AUTO_DEMOTE (_AC(1, ULL) << 25) +#define SNB_C3_AUTO_UNDEMOTE (_AC(1, ULL) << 27) +#define SNB_C1_AUTO_UNDEMOTE (_AC(1, ULL) << 28) + +#define MSR_ARCH_CAPABILITIES 0x0000010a +#define ARCH_CAPS_RDCL_NO (_AC(1, ULL) << 0) +#define ARCH_CAPS_IBRS_ALL (_AC(1, ULL) << 1) +#define ARCH_CAPS_RSBA (_AC(1, ULL) << 2) +#define ARCH_CAPS_SKIP_L1DFL (_AC(1, ULL) << 3) +#define ARCH_CAPS_SSB_NO (_AC(1, ULL) << 4) +#define ARCH_CAPS_MDS_NO (_AC(1, ULL) << 5) +#define ARCH_CAPS_IF_PSCHANGE_MC_NO (_AC(1, ULL) << 6) +#define ARCH_CAPS_TSX_CTRL (_AC(1, ULL) << 7) +#define ARCH_CAPS_TAA_NO (_AC(1, ULL) << 8) + +#define MSR_FLUSH_CMD 0x0000010b +#define FLUSH_CMD_L1D (_AC(1, ULL) << 0) + +#define MSR_TSX_FORCE_ABORT 0x0000010f +#define TSX_FORCE_ABORT_RTM (_AC(1, ULL) << 0) +#define TSX_CPUID_CLEAR (_AC(1, ULL) << 1) +#define TSX_ENABLE_RTM (_AC(1, ULL) << 2) + +#define MSR_TSX_CTRL 0x00000122 +#define TSX_CTRL_RTM_DISABLE (_AC(1, ULL) << 0) +#define TSX_CTRL_CPUID_CLEAR (_AC(1, ULL) << 1) + +#define MSR_MCU_OPT_CTRL 0x00000123 +#define MCU_OPT_CTRL_RNGDS_MITG_DIS (_AC(1, ULL) << 0) + +#define MSR_RTIT_OUTPUT_BASE 0x00000560 +#define MSR_RTIT_OUTPUT_MASK 0x00000561 +#define MSR_RTIT_CTL 0x00000570 +#define RTIT_CTL_TRACE_EN (_AC(1, ULL) << 0) +#define RTIT_CTL_CYC_EN (_AC(1, ULL) << 1) +#define RTIT_CTL_OS (_AC(1, ULL) << 2) +#define RTIT_CTL_USR (_AC(1, ULL) << 3) +#define RTIT_CTL_PWR_EVT_EN (_AC(1, ULL) << 4) +#define RTIT_CTL_FUP_ON_PTW (_AC(1, ULL) << 5) +#define RTIT_CTL_FABRIC_EN (_AC(1, ULL) << 6) +#define RTIT_CTL_CR3_FILTER (_AC(1, ULL) << 7) +#define RTIT_CTL_TOPA (_AC(1, ULL) << 8) +#define RTIT_CTL_MTC_EN (_AC(1, ULL) << 9) +#define RTIT_CTL_TSC_EN (_AC(1, ULL) << 10) +#define RTIT_CTL_DIS_RETC (_AC(1, ULL) << 11) +#define RTIT_CTL_PTW_EN (_AC(1, ULL) << 12) +#define RTIT_CTL_BRANCH_EN (_AC(1, ULL) << 13) +#define RTIT_CTL_MTC_FREQ (_AC(0xf, ULL) << 14) +#define RTIT_CTL_CYC_THRESH (_AC(0xf, ULL) << 19) +#define RTIT_CTL_PSB_FREQ (_AC(0xf, ULL) << 24) +#define RTIT_CTL_ADDR(n) (_AC(0xf, ULL) << (32 + 4 * (n))) +#define MSR_RTIT_STATUS 0x00000571 +#define RTIT_STATUS_FILTER_EN (_AC(1, ULL) << 0) +#define RTIT_STATUS_CONTEXT_EN (_AC(1, ULL) << 1) +#define RTIT_STATUS_TRIGGER_EN (_AC(1, ULL) << 2) +#define RTIT_STATUS_ERROR (_AC(1, ULL) << 4) +#define RTIT_STATUS_STOPPED (_AC(1, ULL) << 5) +#define RTIT_STATUS_BYTECNT (_AC(0x1ffff, ULL) << 32) +#define MSR_RTIT_CR3_MATCH 0x00000572 +#define MSR_RTIT_ADDR_A(n) (0x00000580 + (n) * 2) +#define MSR_RTIT_ADDR_B(n) (0x00000581 + (n) * 2) + +#define MSR_U_CET 0x000006a0 +#define MSR_S_CET 0x000006a2 +#define CET_SHSTK_EN (_AC(1, ULL) << 0) +#define CET_WRSS_EN (_AC(1, ULL) << 1) + +#define MSR_PL0_SSP 0x000006a4 +#define MSR_PL1_SSP 0x000006a5 +#define MSR_PL2_SSP 0x000006a6 +#define MSR_PL3_SSP 0x000006a7 +#define MSR_INTERRUPT_SSP_TABLE 0x000006a8 + +#define MSR_X2APIC_FIRST 0x00000800 +#define MSR_X2APIC_LAST 0x00000bff + +#define MSR_X2APIC_TPR 0x00000808 +#define MSR_X2APIC_PPR 0x0000080a +#define MSR_X2APIC_EOI 0x0000080b +#define MSR_X2APIC_TMICT 0x00000838 +#define MSR_X2APIC_TMCCT 0x00000839 +#define MSR_X2APIC_SELF 0x0000083f + +#define MSR_PASID 0x00000d93 +#define PASID_PASID_MASK 0x000fffff +#define PASID_VALID (_AC(1, ULL) << 31) + +#define MSR_EFER 0xc0000080 /* Extended Feature Enable Register */ +#define EFER_SCE (_AC(1, ULL) << 0) /* SYSCALL Enable */ +#define EFER_LME (_AC(1, ULL) << 8) /* Long Mode Enable */ +#define EFER_LMA (_AC(1, ULL) << 10) /* Long Mode Active */ +#define EFER_NXE (_AC(1, ULL) << 11) /* No Execute Enable */ +#define EFER_SVME (_AC(1, ULL) << 12) /* Secure Virtual Machine Enable */ +#define EFER_FFXSE (_AC(1, ULL) << 14) /* Fast FXSAVE/FXRSTOR */ + +#define EFER_KNOWN_MASK \ + (EFER_SCE | EFER_LME | EFER_LMA | EFER_NXE | EFER_SVME | EFER_FFXSE) + +#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ +#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ +#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ +#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ +#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ +#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ +#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ +#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ + +#define MSR_K8_SYSCFG 0xc0010010 +#define SYSCFG_MTRR_FIX_DRAM_EN (_AC(1, ULL) << 18) +#define SYSCFG_MTRR_FIX_DRAM_MOD_EN (_AC(1, ULL) << 19) +#define SYSCFG_MTRR_VAR_DRAM_EN (_AC(1, ULL) << 20) +#define SYSCFG_MTRR_TOM2_EN (_AC(1, ULL) << 21) +#define SYSCFG_TOM2_FORCE_WB (_AC(1, ULL) << 22) + +#define MSR_K8_IORR_BASE0 0xc0010016 +#define MSR_K8_IORR_MASK0 0xc0010017 +#define MSR_K8_IORR_BASE1 0xc0010018 +#define MSR_K8_IORR_MASK1 0xc0010019 + +#define MSR_K8_TSEG_BASE 0xc0010112 /* AMD doc: SMMAddr */ +#define MSR_K8_TSEG_MASK 0xc0010113 /* AMD doc: SMMMask */ + +#define MSR_K8_VM_CR 0xc0010114 +#define VM_CR_INIT_REDIRECTION (_AC(1, ULL) << 1) +#define VM_CR_SVM_DISABLE (_AC(1, ULL) << 4) + +#define MSR_VIRT_SPEC_CTRL 0xc001011f /* Layout matches MSR_SPEC_CTRL */ + +/* + * Legacy MSR constants in need of cleanup. No new MSRs below this comment. + */ + +/* Intel MSRs. Some also available on other CPUs */ +#define MSR_IA32_PERFCTR0 0x000000c1 +#define MSR_IA32_A_PERFCTR0 0x000004c1 +#define MSR_FSB_FREQ 0x000000cd + +#define MSR_MTRRcap 0x000000fe +#define MTRRcap_VCNT 0x000000ff + +#define MSR_IA32_BBL_CR_CTL 0x00000119 + +#define MSR_IA32_SYSENTER_CS 0x00000174 +#define MSR_IA32_SYSENTER_ESP 0x00000175 +#define MSR_IA32_SYSENTER_EIP 0x00000176 + +#define MSR_IA32_MCG_CAP 0x00000179 +#define MSR_IA32_MCG_STATUS 0x0000017a +#define MSR_IA32_MCG_CTL 0x0000017b +#define MSR_IA32_MCG_EXT_CTL 0x000004d0 + +#define MSR_IA32_PEBS_ENABLE 0x000003f1 +#define MSR_IA32_DS_AREA 0x00000600 +#define MSR_IA32_PERF_CAPABILITIES 0x00000345 +/* Lower 6 bits define the format of the address in the LBR stack */ +#define MSR_IA32_PERF_CAP_LBR_FORMAT 0x3f + +#define MSR_IA32_BNDCFGS 0x00000d90 +#define IA32_BNDCFGS_ENABLE 0x00000001 +#define IA32_BNDCFGS_PRESERVE 0x00000002 +#define IA32_BNDCFGS_RESERVED 0x00000ffc + +#define MSR_IA32_XSS 0x00000da0 + +#define MSR_MTRRfix64K_00000 0x00000250 +#define MSR_MTRRfix16K_80000 0x00000258 +#define MSR_MTRRfix16K_A0000 0x00000259 +#define MSR_MTRRfix4K_C0000 0x00000268 +#define MSR_MTRRfix4K_C8000 0x00000269 +#define MSR_MTRRfix4K_D0000 0x0000026a +#define MSR_MTRRfix4K_D8000 0x0000026b +#define MSR_MTRRfix4K_E0000 0x0000026c +#define MSR_MTRRfix4K_E8000 0x0000026d +#define MSR_MTRRfix4K_F0000 0x0000026e +#define MSR_MTRRfix4K_F8000 0x0000026f +#define MSR_MTRRdefType 0x000002ff +#define MTRRdefType_FE (1u << 10) +#define MTRRdefType_E (1u << 11) + +#define MSR_IA32_DEBUGCTLMSR 0x000001d9 +#define IA32_DEBUGCTLMSR_LBR (1<<0) /* Last Branch Record */ +#define IA32_DEBUGCTLMSR_BTF (1<<1) /* Single Step on Branches */ +#define IA32_DEBUGCTLMSR_TR (1<<6) /* Trace Message Enable */ +#define IA32_DEBUGCTLMSR_BTS (1<<7) /* Branch Trace Store */ +#define IA32_DEBUGCTLMSR_BTINT (1<<8) /* Branch Trace Interrupt */ +#define IA32_DEBUGCTLMSR_BTS_OFF_OS (1<<9) /* BTS off if CPL 0 */ +#define IA32_DEBUGCTLMSR_BTS_OFF_USR (1<<10) /* BTS off if CPL > 0 */ +#define IA32_DEBUGCTLMSR_RTM (1<<15) /* RTM debugging enable */ + +#define MSR_IA32_LASTBRANCHFROMIP 0x000001db +#define MSR_IA32_LASTBRANCHTOIP 0x000001dc +#define MSR_IA32_LASTINTFROMIP 0x000001dd +#define MSR_IA32_LASTINTTOIP 0x000001de + +#define MSR_IA32_POWER_CTL 0x000001fc + +#define MSR_IA32_MTRR_PHYSBASE(n) (0x00000200 + 2 * (n)) +#define MSR_IA32_MTRR_PHYSMASK(n) (0x00000201 + 2 * (n)) + +#define MSR_IA32_CR_PAT 0x00000277 +#define MSR_IA32_CR_PAT_RESET 0x0007040600070406ULL + +#define MSR_IA32_MC0_CTL 0x00000400 +#define MSR_IA32_MC0_STATUS 0x00000401 +#define MSR_IA32_MC0_ADDR 0x00000402 +#define MSR_IA32_MC0_MISC 0x00000403 +#define MSR_IA32_MC0_CTL2 0x00000280 +#define CMCI_EN (1UL<<30) +#define CMCI_THRESHOLD_MASK 0x7FFF + +#define MSR_AMD64_MC0_MASK 0xc0010044 + +#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) +#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) +#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) +#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) +#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) + +#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) + +/* MSRs & bits used for VMX enabling */ +#define MSR_IA32_VMX_BASIC 0x480 +#define MSR_IA32_VMX_PINBASED_CTLS 0x481 +#define MSR_IA32_VMX_PROCBASED_CTLS 0x482 +#define MSR_IA32_VMX_EXIT_CTLS 0x483 +#define MSR_IA32_VMX_ENTRY_CTLS 0x484 +#define MSR_IA32_VMX_MISC 0x485 +#define MSR_IA32_VMX_CR0_FIXED0 0x486 +#define MSR_IA32_VMX_CR0_FIXED1 0x487 +#define MSR_IA32_VMX_CR4_FIXED0 0x488 +#define MSR_IA32_VMX_CR4_FIXED1 0x489 +#define MSR_IA32_VMX_VMCS_ENUM 0x48a +#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b +#define MSR_IA32_VMX_EPT_VPID_CAP 0x48c +#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x48d +#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x48e +#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x48f +#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x490 +#define MSR_IA32_VMX_VMFUNC 0x491 + +/* K7/K8 MSRs. Not complete. See the architecture manual for a more + complete list. */ +#define MSR_K7_EVNTSEL0 0xc0010000 +#define MSR_K7_PERFCTR0 0xc0010004 +#define MSR_K7_EVNTSEL1 0xc0010001 +#define MSR_K7_PERFCTR1 0xc0010005 +#define MSR_K7_EVNTSEL2 0xc0010002 +#define MSR_K7_PERFCTR2 0xc0010006 +#define MSR_K7_EVNTSEL3 0xc0010003 +#define MSR_K7_PERFCTR3 0xc0010007 +#define MSR_K8_TOP_MEM1 0xc001001a +#define MSR_K7_CLK_CTL 0xc001001b +#define MSR_K8_TOP_MEM2 0xc001001d + +#define MSR_K8_HWCR 0xc0010015 +#define K8_HWCR_TSC_FREQ_SEL (1ULL << 24) + +#define MSR_K7_FID_VID_CTL 0xc0010041 +#define MSR_K7_FID_VID_STATUS 0xc0010042 +#define MSR_K8_PSTATE_LIMIT 0xc0010061 +#define MSR_K8_PSTATE_CTRL 0xc0010062 +#define MSR_K8_PSTATE_STATUS 0xc0010063 +#define MSR_K8_PSTATE0 0xc0010064 +#define MSR_K8_PSTATE1 0xc0010065 +#define MSR_K8_PSTATE2 0xc0010066 +#define MSR_K8_PSTATE3 0xc0010067 +#define MSR_K8_PSTATE4 0xc0010068 +#define MSR_K8_PSTATE5 0xc0010069 +#define MSR_K8_PSTATE6 0xc001006A +#define MSR_K8_PSTATE7 0xc001006B +#define MSR_K8_ENABLE_C1E 0xc0010055 +#define MSR_K8_VM_HSAVE_PA 0xc0010117 + +#define MSR_AMD_FAM15H_EVNTSEL0 0xc0010200 +#define MSR_AMD_FAM15H_PERFCTR0 0xc0010201 +#define MSR_AMD_FAM15H_EVNTSEL1 0xc0010202 +#define MSR_AMD_FAM15H_PERFCTR1 0xc0010203 +#define MSR_AMD_FAM15H_EVNTSEL2 0xc0010204 +#define MSR_AMD_FAM15H_PERFCTR2 0xc0010205 +#define MSR_AMD_FAM15H_EVNTSEL3 0xc0010206 +#define MSR_AMD_FAM15H_PERFCTR3 0xc0010207 +#define MSR_AMD_FAM15H_EVNTSEL4 0xc0010208 +#define MSR_AMD_FAM15H_PERFCTR4 0xc0010209 +#define MSR_AMD_FAM15H_EVNTSEL5 0xc001020a +#define MSR_AMD_FAM15H_PERFCTR5 0xc001020b + +#define MSR_AMD_L7S0_FEATURE_MASK 0xc0011002 +#define MSR_AMD_THRM_FEATURE_MASK 0xc0011003 +#define MSR_K8_FEATURE_MASK 0xc0011004 +#define MSR_K8_EXT_FEATURE_MASK 0xc0011005 + +/* AMD64 MSRs */ +#define MSR_AMD64_NB_CFG 0xc001001f +#define AMD64_NB_CFG_CF8_EXT_ENABLE_BIT 46 +#define MSR_AMD64_LS_CFG 0xc0011020 +#define MSR_AMD64_IC_CFG 0xc0011021 +#define MSR_AMD64_DC_CFG 0xc0011022 +#define MSR_AMD64_DE_CFG 0xc0011029 +#define AMD64_DE_CFG_LFENCE_SERIALISE (_AC(1, ULL) << 1) +#define MSR_AMD64_EX_CFG 0xc001102c + +#define MSR_AMD64_DR0_ADDRESS_MASK 0xc0011027 +#define MSR_AMD64_DR1_ADDRESS_MASK 0xc0011019 +#define MSR_AMD64_DR2_ADDRESS_MASK 0xc001101a +#define MSR_AMD64_DR3_ADDRESS_MASK 0xc001101b + +/* AMD Family10h machine check MSRs */ +#define MSR_F10_MC4_MISC1 0xc0000408 +#define MSR_F10_MC4_MISC2 0xc0000409 +#define MSR_F10_MC4_MISC3 0xc000040A + +/* AMD Family10h Bus Unit MSRs */ +#define MSR_F10_BU_CFG 0xc0011023 +#define MSR_F10_BU_CFG2 0xc001102a + +/* Other AMD Fam10h MSRs */ +#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 +#define FAM10H_MMIO_CONF_ENABLE (1<<0) +#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf +#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 +#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL +#define FAM10H_MMIO_CONF_BASE_SHIFT 20 + +/* AMD Microcode MSRs */ +#define MSR_AMD_PATCHLEVEL 0x0000008b +#define MSR_AMD_PATCHLOADER 0xc0010020 + +/* AMD TSC RATE MSR */ +#define MSR_AMD64_TSC_RATIO 0xc0000104 + +/* AMD Lightweight Profiling MSRs */ +#define MSR_AMD64_LWP_CFG 0xc0000105 +#define MSR_AMD64_LWP_CBADDR 0xc0000106 + +/* AMD OS Visible Workaround MSRs */ +#define MSR_AMD_OSVW_ID_LENGTH 0xc0010140 +#define MSR_AMD_OSVW_STATUS 0xc0010141 + +/* AMD Protected Processor Inventory Number */ +#define MSR_AMD_PPIN_CTL 0xc00102f0 +#define MSR_AMD_PPIN 0xc00102f1 + +/* K6 MSRs */ +#define MSR_K6_EFER 0xc0000080 +#define MSR_K6_STAR 0xc0000081 +#define MSR_K6_WHCR 0xc0000082 +#define MSR_K6_UWCCR 0xc0000085 +#define MSR_K6_EPMR 0xc0000086 +#define MSR_K6_PSOR 0xc0000087 +#define MSR_K6_PFIR 0xc0000088 + +/* Centaur-Hauls/IDT defined MSRs. */ +#define MSR_IDT_FCR1 0x00000107 +#define MSR_IDT_FCR2 0x00000108 +#define MSR_IDT_FCR3 0x00000109 +#define MSR_IDT_FCR4 0x0000010a + +#define MSR_IDT_MCR0 0x00000110 +#define MSR_IDT_MCR1 0x00000111 +#define MSR_IDT_MCR2 0x00000112 +#define MSR_IDT_MCR3 0x00000113 +#define MSR_IDT_MCR4 0x00000114 +#define MSR_IDT_MCR5 0x00000115 +#define MSR_IDT_MCR6 0x00000116 +#define MSR_IDT_MCR7 0x00000117 +#define MSR_IDT_MCR_CTRL 0x00000120 + +/* VIA Cyrix defined MSRs*/ +#define MSR_VIA_FCR 0x00001107 +#define MSR_VIA_LONGHAUL 0x0000110a +#define MSR_VIA_RNG 0x0000110b +#define MSR_VIA_BCR2 0x00001147 + +/* Transmeta defined MSRs */ +#define MSR_TMTA_LONGRUN_CTRL 0x80868010 +#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 +#define MSR_TMTA_LRTI_READOUT 0x80868018 +#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a + +/* Intel defined MSRs. */ +#define MSR_IA32_P5_MC_ADDR 0x00000000 +#define MSR_IA32_P5_MC_TYPE 0x00000001 +#define MSR_IA32_TSC 0x00000010 +#define MSR_IA32_PLATFORM_ID 0x00000017 +#define MSR_IA32_EBL_CR_POWERON 0x0000002a +#define MSR_IA32_EBC_FREQUENCY_ID 0x0000002c + +#define MSR_IA32_FEATURE_CONTROL 0x0000003a +#define IA32_FEATURE_CONTROL_LOCK 0x0001 +#define IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX 0x0002 +#define IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX 0x0004 +#define IA32_FEATURE_CONTROL_SENTER_PARAM_CTL 0x7f00 +#define IA32_FEATURE_CONTROL_ENABLE_SENTER 0x8000 +#define IA32_FEATURE_CONTROL_SGX_ENABLE 0x40000 +#define IA32_FEATURE_CONTROL_LMCE_ON 0x100000 + +#define MSR_IA32_TSC_ADJUST 0x0000003b + +#define MSR_IA32_UCODE_WRITE 0x00000079 +#define MSR_IA32_UCODE_REV 0x0000008b + +#define MSR_IA32_PERF_STATUS 0x00000198 +#define MSR_IA32_PERF_CTL 0x00000199 + +#define MSR_IA32_MPERF 0x000000e7 +#define MSR_IA32_APERF 0x000000e8 + +#define MSR_IA32_THERM_CONTROL 0x0000019a +#define MSR_IA32_THERM_INTERRUPT 0x0000019b +#define MSR_IA32_THERM_STATUS 0x0000019c +#define MSR_IA32_MISC_ENABLE 0x000001a0 +#define MSR_IA32_MISC_ENABLE_PERF_AVAIL (1<<7) +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1<<11) +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1<<12) +#define MSR_IA32_MISC_ENABLE_MONITOR_ENABLE (1<<18) +#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1<<22) +#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1<<23) +#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) + +#define MSR_IA32_TSC_DEADLINE 0x000006E0 +#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 + +/* Platform Shared Resource MSRs */ +#define MSR_IA32_CMT_EVTSEL 0x00000c8d +#define MSR_IA32_CMT_EVTSEL_UE_MASK 0x0000ffff +#define MSR_IA32_CMT_CTR 0x00000c8e +#define MSR_IA32_PSR_ASSOC 0x00000c8f +#define MSR_IA32_PSR_L3_QOS_CFG 0x00000c81 +#define MSR_IA32_PSR_L3_MASK(n) (0x00000c90 + (n)) +#define MSR_IA32_PSR_L3_MASK_CODE(n) (0x00000c90 + (n) * 2 + 1) +#define MSR_IA32_PSR_L3_MASK_DATA(n) (0x00000c90 + (n) * 2) +#define MSR_IA32_PSR_L2_MASK(n) (0x00000d10 + (n)) +#define MSR_IA32_PSR_MBA_MASK(n) (0x00000d50 + (n)) + +/* Intel Model 6 */ +#define MSR_P6_PERFCTR(n) (0x000000c1 + (n)) +#define MSR_P6_EVNTSEL(n) (0x00000186 + (n)) + +/* P4/Xeon+ specific */ +#define MSR_IA32_MCG_EAX 0x00000180 +#define MSR_IA32_MCG_EBX 0x00000181 +#define MSR_IA32_MCG_ECX 0x00000182 +#define MSR_IA32_MCG_EDX 0x00000183 +#define MSR_IA32_MCG_ESI 0x00000184 +#define MSR_IA32_MCG_EDI 0x00000185 +#define MSR_IA32_MCG_EBP 0x00000186 +#define MSR_IA32_MCG_ESP 0x00000187 +#define MSR_IA32_MCG_EFLAGS 0x00000188 +#define MSR_IA32_MCG_EIP 0x00000189 +#define MSR_IA32_MCG_MISC 0x0000018a +#define MSR_IA32_MCG_R8 0x00000190 +#define MSR_IA32_MCG_R9 0x00000191 +#define MSR_IA32_MCG_R10 0x00000192 +#define MSR_IA32_MCG_R11 0x00000193 +#define MSR_IA32_MCG_R12 0x00000194 +#define MSR_IA32_MCG_R13 0x00000195 +#define MSR_IA32_MCG_R14 0x00000196 +#define MSR_IA32_MCG_R15 0x00000197 + +/* Pentium IV performance counter MSRs */ +#define MSR_P4_BPU_PERFCTR0 0x00000300 +#define MSR_P4_BPU_PERFCTR1 0x00000301 +#define MSR_P4_BPU_PERFCTR2 0x00000302 +#define MSR_P4_BPU_PERFCTR3 0x00000303 +#define MSR_P4_MS_PERFCTR0 0x00000304 +#define MSR_P4_MS_PERFCTR1 0x00000305 +#define MSR_P4_MS_PERFCTR2 0x00000306 +#define MSR_P4_MS_PERFCTR3 0x00000307 +#define MSR_P4_FLAME_PERFCTR0 0x00000308 +#define MSR_P4_FLAME_PERFCTR1 0x00000309 +#define MSR_P4_FLAME_PERFCTR2 0x0000030a +#define MSR_P4_FLAME_PERFCTR3 0x0000030b +#define MSR_P4_IQ_PERFCTR0 0x0000030c +#define MSR_P4_IQ_PERFCTR1 0x0000030d +#define MSR_P4_IQ_PERFCTR2 0x0000030e +#define MSR_P4_IQ_PERFCTR3 0x0000030f +#define MSR_P4_IQ_PERFCTR4 0x00000310 +#define MSR_P4_IQ_PERFCTR5 0x00000311 +#define MSR_P4_BPU_CCCR0 0x00000360 +#define MSR_P4_BPU_CCCR1 0x00000361 +#define MSR_P4_BPU_CCCR2 0x00000362 +#define MSR_P4_BPU_CCCR3 0x00000363 +#define MSR_P4_MS_CCCR0 0x00000364 +#define MSR_P4_MS_CCCR1 0x00000365 +#define MSR_P4_MS_CCCR2 0x00000366 +#define MSR_P4_MS_CCCR3 0x00000367 +#define MSR_P4_FLAME_CCCR0 0x00000368 +#define MSR_P4_FLAME_CCCR1 0x00000369 +#define MSR_P4_FLAME_CCCR2 0x0000036a +#define MSR_P4_FLAME_CCCR3 0x0000036b +#define MSR_P4_IQ_CCCR0 0x0000036c +#define MSR_P4_IQ_CCCR1 0x0000036d +#define MSR_P4_IQ_CCCR2 0x0000036e +#define MSR_P4_IQ_CCCR3 0x0000036f +#define MSR_P4_IQ_CCCR4 0x00000370 +#define MSR_P4_IQ_CCCR5 0x00000371 +#define MSR_P4_ALF_ESCR0 0x000003ca +#define MSR_P4_ALF_ESCR1 0x000003cb +#define MSR_P4_BPU_ESCR0 0x000003b2 +#define MSR_P4_BPU_ESCR1 0x000003b3 +#define MSR_P4_BSU_ESCR0 0x000003a0 +#define MSR_P4_BSU_ESCR1 0x000003a1 +#define MSR_P4_CRU_ESCR0 0x000003b8 +#define MSR_P4_CRU_ESCR1 0x000003b9 +#define MSR_P4_CRU_ESCR2 0x000003cc +#define MSR_P4_CRU_ESCR3 0x000003cd +#define MSR_P4_CRU_ESCR4 0x000003e0 +#define MSR_P4_CRU_ESCR5 0x000003e1 +#define MSR_P4_DAC_ESCR0 0x000003a8 +#define MSR_P4_DAC_ESCR1 0x000003a9 +#define MSR_P4_FIRM_ESCR0 0x000003a4 +#define MSR_P4_FIRM_ESCR1 0x000003a5 +#define MSR_P4_FLAME_ESCR0 0x000003a6 +#define MSR_P4_FLAME_ESCR1 0x000003a7 +#define MSR_P4_FSB_ESCR0 0x000003a2 +#define MSR_P4_FSB_ESCR1 0x000003a3 +#define MSR_P4_IQ_ESCR0 0x000003ba +#define MSR_P4_IQ_ESCR1 0x000003bb +#define MSR_P4_IS_ESCR0 0x000003b4 +#define MSR_P4_IS_ESCR1 0x000003b5 +#define MSR_P4_ITLB_ESCR0 0x000003b6 +#define MSR_P4_ITLB_ESCR1 0x000003b7 +#define MSR_P4_IX_ESCR0 0x000003c8 +#define MSR_P4_IX_ESCR1 0x000003c9 +#define MSR_P4_MOB_ESCR0 0x000003aa +#define MSR_P4_MOB_ESCR1 0x000003ab +#define MSR_P4_MS_ESCR0 0x000003c0 +#define MSR_P4_MS_ESCR1 0x000003c1 +#define MSR_P4_PMH_ESCR0 0x000003ac +#define MSR_P4_PMH_ESCR1 0x000003ad +#define MSR_P4_RAT_ESCR0 0x000003bc +#define MSR_P4_RAT_ESCR1 0x000003bd +#define MSR_P4_SAAT_ESCR0 0x000003ae +#define MSR_P4_SAAT_ESCR1 0x000003af +#define MSR_P4_SSU_ESCR0 0x000003be +#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ + +#define MSR_P4_TBPU_ESCR0 0x000003c2 +#define MSR_P4_TBPU_ESCR1 0x000003c3 +#define MSR_P4_TC_ESCR0 0x000003c4 +#define MSR_P4_TC_ESCR1 0x000003c5 +#define MSR_P4_U2L_ESCR0 0x000003b0 +#define MSR_P4_U2L_ESCR1 0x000003b1 + +/* Netburst (P4) last-branch recording */ +#define MSR_P4_LER_FROM_LIP 0x000001d7 +#define MSR_P4_LER_TO_LIP 0x000001d8 +#define MSR_P4_LASTBRANCH_TOS 0x000001da +#define MSR_P4_LASTBRANCH_0 0x000001db +#define NUM_MSR_P4_LASTBRANCH 4 +#define MSR_P4_LASTBRANCH_0_FROM_LIP 0x00000680 +#define MSR_P4_LASTBRANCH_0_TO_LIP 0x000006c0 +#define NUM_MSR_P4_LASTBRANCH_FROM_TO 16 + +/* Core 2 and Atom last-branch recording */ +#define MSR_C2_LASTBRANCH_TOS 0x000001c9 +#define MSR_C2_LASTBRANCH_0_FROM_IP 0x00000040 +#define MSR_C2_LASTBRANCH_0_TO_IP 0x00000060 +#define NUM_MSR_C2_LASTBRANCH_FROM_TO 4 +#define NUM_MSR_ATOM_LASTBRANCH_FROM_TO 8 + +/* Nehalem (and newer) last-branch recording */ +#define MSR_NHL_LBR_SELECT 0x000001c8 +#define MSR_NHL_LASTBRANCH_TOS 0x000001c9 + +/* Skylake (and newer) last-branch recording */ +#define MSR_SKL_LASTBRANCH_0_FROM_IP 0x00000680 +#define MSR_SKL_LASTBRANCH_0_TO_IP 0x000006c0 +#define MSR_SKL_LASTBRANCH_0_INFO 0x00000dc0 +#define NUM_MSR_SKL_LASTBRANCH 32 + +/* Silvermont (and newer) last-branch recording */ +#define MSR_SM_LBR_SELECT 0x000001c8 +#define MSR_SM_LASTBRANCH_TOS 0x000001c9 + +/* Goldmont last-branch recording */ +#define MSR_GM_LASTBRANCH_0_FROM_IP 0x00000680 +#define MSR_GM_LASTBRANCH_0_TO_IP 0x000006c0 +#define NUM_MSR_GM_LASTBRANCH_FROM_TO 32 + +/* Intel Core-based CPU performance counters */ +#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 +#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a +#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b +#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d +#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e +#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f +#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 + +/* Intel cpuid spoofing MSRs */ +#define MSR_INTEL_MASK_V1_CPUID1 0x00000478 + +#define MSR_INTEL_MASK_V2_CPUID1 0x00000130 +#define MSR_INTEL_MASK_V2_CPUID80000001 0x00000131 + +#define MSR_INTEL_MASK_V3_CPUID1 0x00000132 +#define MSR_INTEL_MASK_V3_CPUID80000001 0x00000133 +#define MSR_INTEL_MASK_V3_CPUIDD_01 0x00000134 + +/* Intel cpuid faulting MSRs */ +#define MSR_INTEL_PLATFORM_INFO 0x000000ce +#define _MSR_PLATFORM_INFO_CPUID_FAULTING 31 +#define MSR_PLATFORM_INFO_CPUID_FAULTING (1ULL << _MSR_PLATFORM_INFO_CPUID_FAULTING) + +#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140 +#define _MSR_MISC_FEATURES_CPUID_FAULTING 0 +#define MSR_MISC_FEATURES_CPUID_FAULTING (1ULL << _MSR_MISC_FEATURES_CPUID_FAULTING) + +#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 +#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 + +/* Interrupt Response Limit */ +#define MSR_PKGC3_IRTL 0x0000060a +#define MSR_PKGC6_IRTL 0x0000060b +#define MSR_PKGC7_IRTL 0x0000060c +#define MSR_PKGC8_IRTL 0x00000633 +#define MSR_PKGC9_IRTL 0x00000634 +#define MSR_PKGC10_IRTL 0x00000635 + +#endif /* __ASM_MSR_INDEX_H */ diff --git a/xen/arch/x86/include/asm/msr.h b/xen/arch/x86/include/asm/msr.h new file mode 100644 index 0000000000..1d3eca9063 --- /dev/null +++ b/xen/arch/x86/include/asm/msr.h @@ -0,0 +1,381 @@ +#ifndef __ASM_MSR_H +#define __ASM_MSR_H + +#include "msr-index.h" + +#include +#include +#include + +#include + +#include +#include +#include + +#define rdmsr(msr,val1,val2) \ + __asm__ __volatile__("rdmsr" \ + : "=a" (val1), "=d" (val2) \ + : "c" (msr)) + +#define rdmsrl(msr,val) do { unsigned long a__,b__; \ + __asm__ __volatile__("rdmsr" \ + : "=a" (a__), "=d" (b__) \ + : "c" (msr)); \ + val = a__ | ((u64)b__<<32); \ +} while(0) + +#define wrmsr(msr,val1,val2) \ + __asm__ __volatile__("wrmsr" \ + : /* no outputs */ \ + : "c" (msr), "a" (val1), "d" (val2)) + +static inline void wrmsrl(unsigned int msr, __u64 val) +{ + __u32 lo, hi; + lo = (__u32)val; + hi = (__u32)(val >> 32); + wrmsr(msr, lo, hi); +} + +/* rdmsr with exception handling */ +#define rdmsr_safe(msr,val) ({\ + int rc_; \ + uint32_t lo_, hi_; \ + __asm__ __volatile__( \ + "1: rdmsr\n2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: xorl %0,%0\n; xorl %1,%1\n" \ + " movl %5,%2\n; jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=a" (lo_), "=d" (hi_), "=&r" (rc_) \ + : "c" (msr), "2" (0), "i" (-EFAULT)); \ + val = lo_ | ((uint64_t)hi_ << 32); \ + rc_; }) + +/* wrmsr with exception handling */ +static inline int wrmsr_safe(unsigned int msr, uint64_t val) +{ + int rc; + uint32_t lo, hi; + lo = (uint32_t)val; + hi = (uint32_t)(val >> 32); + + __asm__ __volatile__( + "1: wrmsr\n2:\n" + ".section .fixup,\"ax\"\n" + "3: movl %5,%0\n; jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + : "=&r" (rc) + : "c" (msr), "a" (lo), "d" (hi), "0" (0), "i" (-EFAULT)); + return rc; +} + +static inline uint64_t msr_fold(const struct cpu_user_regs *regs) +{ + return (regs->rdx << 32) | regs->eax; +} + +static inline void msr_split(struct cpu_user_regs *regs, uint64_t val) +{ + regs->rdx = val >> 32; + regs->rax = (uint32_t)val; +} + +static inline uint64_t rdtsc(void) +{ + uint32_t low, high; + + __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)); + + return ((uint64_t)high << 32) | low; +} + +static inline uint64_t rdtsc_ordered(void) +{ + /* + * The RDTSC instruction is not ordered relative to memory access. + * The Intel SDM and the AMD APM are both vague on this point, but + * empirically an RDTSC instruction can be speculatively executed + * before prior loads. An RDTSC immediately after an appropriate + * barrier appears to be ordered as a normal load, that is, it + * provides the same ordering guarantees as reading from a global + * memory location that some other imaginary CPU is updating + * continuously with a time stamp. + */ + alternative("lfence", "mfence", X86_FEATURE_MFENCE_RDTSC); + return rdtsc(); +} + +#define __write_tsc(val) wrmsrl(MSR_IA32_TSC, val) +#define write_tsc(val) ({ \ + /* Reliable TSCs are in lockstep across all CPUs. We should \ + * never write to them. */ \ + ASSERT(!boot_cpu_has(X86_FEATURE_TSC_RELIABLE)); \ + __write_tsc(val); \ +}) + +#define rdpmc(counter,low,high) \ + __asm__ __volatile__("rdpmc" \ + : "=a" (low), "=d" (high) \ + : "c" (counter)) + +/* + * On hardware supporting FSGSBASE, the value loaded into hardware is the + * guest kernel's choice for 64bit PV guests (Xen's choice for Idle, HVM and + * 32bit PV). + * + * Therefore, the {RD,WR}{FS,GS}BASE instructions are only safe to use if + * %cr4.fsgsbase is set. + */ +static inline unsigned long __rdfsbase(void) +{ + unsigned long base; + +#ifdef HAVE_AS_FSGSBASE + asm volatile ( "rdfsbase %0" : "=r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc0" : "=a" (base) ); +#endif + + return base; +} + +static inline unsigned long __rdgsbase(void) +{ + unsigned long base; + +#ifdef HAVE_AS_FSGSBASE + asm volatile ( "rdgsbase %0" : "=r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc8" : "=a" (base) ); +#endif + + return base; +} + +static inline void __wrfsbase(unsigned long base) +{ +#ifdef HAVE_AS_FSGSBASE + asm volatile ( "wrfsbase %0" :: "r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd0" :: "a" (base) ); +#endif +} + +static inline void __wrgsbase(unsigned long base) +{ +#ifdef HAVE_AS_FSGSBASE + asm volatile ( "wrgsbase %0" :: "r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd8" :: "a" (base) ); +#endif +} + +static inline unsigned long read_fs_base(void) +{ + unsigned long base; + + if ( read_cr4() & X86_CR4_FSGSBASE ) + return __rdfsbase(); + + rdmsrl(MSR_FS_BASE, base); + + return base; +} + +static inline unsigned long read_gs_base(void) +{ + unsigned long base; + + if ( read_cr4() & X86_CR4_FSGSBASE ) + return __rdgsbase(); + + rdmsrl(MSR_GS_BASE, base); + + return base; +} + +static inline unsigned long read_gs_shadow(void) +{ + unsigned long base; + + if ( read_cr4() & X86_CR4_FSGSBASE ) + { + asm volatile ( "swapgs" ); + base = __rdgsbase(); + asm volatile ( "swapgs" ); + } + else + rdmsrl(MSR_SHADOW_GS_BASE, base); + + return base; +} + +static inline void write_fs_base(unsigned long base) +{ + if ( read_cr4() & X86_CR4_FSGSBASE ) + __wrfsbase(base); + else + wrmsrl(MSR_FS_BASE, base); +} + +static inline void write_gs_base(unsigned long base) +{ + if ( read_cr4() & X86_CR4_FSGSBASE ) + __wrgsbase(base); + else + wrmsrl(MSR_GS_BASE, base); +} + +static inline void write_gs_shadow(unsigned long base) +{ + if ( read_cr4() & X86_CR4_FSGSBASE ) + { + asm volatile ( "swapgs\n\t" +#ifdef HAVE_AS_FSGSBASE + "wrgsbase %0\n\t" + "swapgs" + :: "r" (base) ); +#else + ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd8\n\t" + "swapgs" + :: "a" (base) ); +#endif + } + else + wrmsrl(MSR_SHADOW_GS_BASE, base); +} + +DECLARE_PER_CPU(uint64_t, efer); +static inline uint64_t read_efer(void) +{ + return this_cpu(efer); +} + +static inline void write_efer(uint64_t val) +{ + this_cpu(efer) = val; + wrmsrl(MSR_EFER, val); +} + +extern unsigned int ler_msr; + +DECLARE_PER_CPU(uint32_t, tsc_aux); + +/* Lazy update of MSR_TSC_AUX */ +static inline void wrmsr_tsc_aux(uint32_t val) +{ + uint32_t *this_tsc_aux = &this_cpu(tsc_aux); + + if ( *this_tsc_aux != val ) + { + wrmsr(MSR_TSC_AUX, val, 0); + *this_tsc_aux = val; + } +} + +extern struct msr_policy raw_msr_policy, + host_msr_policy, + pv_max_msr_policy, + pv_def_msr_policy, + hvm_max_msr_policy, + hvm_def_msr_policy; + +/* Container object for per-vCPU MSRs */ +struct vcpu_msrs +{ + /* 0x00000048 - MSR_SPEC_CTRL */ + struct { + uint32_t raw; + } spec_ctrl; + + /* + * 0x00000140 - MSR_INTEL_MISC_FEATURES_ENABLES + * + * This MSR is non-architectural, but for simplicy we allow it to be read + * unconditionally. The CPUID Faulting bit is the only writeable bit, and + * only if enumerated by MSR_PLATFORM_INFO. + */ + union { + uint32_t raw; + struct { + bool cpuid_faulting:1; + }; + } misc_features_enables; + + /* + * 0x00000560 ... 57x - MSR_RTIT_* + * + * "Real Time Instruction Trace", now called Processor Trace. + * + * These MSRs are not exposed to guests. They are controlled by Xen + * behind the scenes, when vmtrace is enabled for the domain. + * + * MSR_RTIT_OUTPUT_BASE not stored here. It is fixed per vcpu, and + * derived from v->vmtrace.buf. + */ + struct { + /* + * Placed in the MSR load/save lists. Only modified by hypercall in + * the common case. + */ + uint64_t ctl; + + /* + * Updated by hardware in non-root mode. Synchronised here on vcpu + * context switch. + */ + uint64_t status; + union { + uint64_t output_mask; + struct { + uint32_t output_limit; + uint32_t output_offset; + }; + }; + } rtit; + + /* 0x00000da0 - MSR_IA32_XSS */ + struct { + uint64_t raw; + } xss; + + /* + * 0xc0000103 - MSR_TSC_AUX + * + * Value is guest chosen, and always loaded in vcpu context. Guests have + * no direct MSR access, and the value is accessible to userspace with the + * RDTSCP and RDPID instructions. + */ + uint32_t tsc_aux; + + /* + * 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK + * + * Loaded into hardware for guests which have active %dr7 settings. + * Furthermore, HVM guests are offered direct access, meaning that the + * values here may be stale in current context. + */ + uint32_t dr_mask[4]; +}; + +void init_guest_msr_policy(void); +int init_domain_msr_policy(struct domain *d); +int init_vcpu_msr_policy(struct vcpu *v); + +/* + * Below functions can return X86EMUL_UNHANDLEABLE which means that MSR is + * not (yet) handled by it and must be processed by legacy handlers. Such + * behaviour is needed for transition period until all rd/wrmsr are handled + * by the new MSR infrastructure. + * + * These functions are also used by the migration logic, so need to cope with + * being used outside of v's context. + */ +int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val); +int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val); + +#endif /* __ASM_MSR_H */ diff --git a/xen/arch/x86/include/asm/mtrr.h b/xen/arch/x86/include/asm/mtrr.h new file mode 100644 index 0000000000..e0fd1005ce --- /dev/null +++ b/xen/arch/x86/include/asm/mtrr.h @@ -0,0 +1,103 @@ +#ifndef __ASM_X86_MTRR_H__ +#define __ASM_X86_MTRR_H__ + +#include + +/* These are the region types. They match the architectural specification. */ +#define MTRR_TYPE_UNCACHABLE 0 +#define MTRR_TYPE_WRCOMB 1 +#define MTRR_TYPE_WRTHROUGH 4 +#define MTRR_TYPE_WRPROT 5 +#define MTRR_TYPE_WRBACK 6 +#define MTRR_NUM_TYPES 7 +#define MEMORY_NUM_TYPES MTRR_NUM_TYPES +#define NO_HARDCODE_MEM_TYPE MTRR_NUM_TYPES + +#define NORMAL_CACHE_MODE 0 +#define NO_FILL_CACHE_MODE 2 + +enum { + PAT_TYPE_UNCACHABLE=0, + PAT_TYPE_WRCOMB=1, + PAT_TYPE_WRTHROUGH=4, + PAT_TYPE_WRPROT=5, + PAT_TYPE_WRBACK=6, + PAT_TYPE_UC_MINUS=7, + PAT_TYPE_NUMS +}; + +#define INVALID_MEM_TYPE PAT_TYPE_NUMS + +/* In the Intel processor's MTRR interface, the MTRR type is always held in + an 8 bit field: */ +typedef u8 mtrr_type; + +#define MTRR_PHYSMASK_VALID_BIT 11 +#define MTRR_PHYSMASK_VALID (1 << MTRR_PHYSMASK_VALID_BIT) +#define MTRR_PHYSMASK_SHIFT 12 +#define MTRR_PHYSBASE_TYPE_MASK 0xff +#define MTRR_PHYSBASE_SHIFT 12 +/* Number of variable range MSR pairs we emulate for HVM guests: */ +#define MTRR_VCNT 8 +/* Maximum number of variable range MSR pairs if FE is supported. */ +#define MTRR_VCNT_MAX ((MSR_MTRRfix64K_00000 - \ + MSR_IA32_MTRR_PHYSBASE(0)) / 2) + +struct mtrr_var_range { + uint64_t base; + uint64_t mask; +}; + +#define NUM_FIXED_RANGES 88 +#define NUM_FIXED_MSR 11 +struct mtrr_state { + struct mtrr_var_range *var_ranges; + mtrr_type fixed_ranges[NUM_FIXED_RANGES]; + bool enabled; + bool fixed_enabled; + bool have_fixed; + mtrr_type def_type; + + u64 mtrr_cap; + /* ranges in var MSRs are overlapped or not:0(no overlapped) */ + bool_t overlapped; +}; +extern struct mtrr_state mtrr_state; + +extern void mtrr_save_fixed_ranges(void *); +extern void mtrr_save_state(void); +extern int mtrr_add(unsigned long base, unsigned long size, + unsigned int type, char increment); +extern int mtrr_add_page(unsigned long base, unsigned long size, + unsigned int type, char increment); +extern int mtrr_del(int reg, unsigned long base, unsigned long size); +extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); +extern int mtrr_get_type(const struct mtrr_state *m, paddr_t pa, + unsigned int order); +extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); +extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr, + paddr_t spaddr, uint8_t gmtrr_mtype); +extern unsigned char pat_type_2_pte_flags(unsigned char pat_type); +extern int hold_mtrr_updates_on_aps; +extern void mtrr_aps_sync_begin(void); +extern void mtrr_aps_sync_end(void); +extern void mtrr_bp_restore(void); + +extern bool_t mtrr_var_range_msr_set(struct domain *, struct mtrr_state *, + uint32_t msr, uint64_t msr_content); +extern bool_t mtrr_fix_range_msr_set(struct domain *, struct mtrr_state *, + uint32_t row, uint64_t msr_content); +extern bool_t mtrr_def_type_msr_set(struct domain *, struct mtrr_state *, + uint64_t msr_content); +#ifdef CONFIG_HVM +extern void memory_type_changed(struct domain *); +#else +static inline void memory_type_changed(struct domain *d) {} +#endif + +extern bool_t pat_msr_set(uint64_t *pat, uint64_t msr); + +bool is_var_mtrr_overlapped(const struct mtrr_state *m); +bool mtrr_pat_not_equal(const struct vcpu *vd, const struct vcpu *vs); + +#endif /* __ASM_X86_MTRR_H__ */ diff --git a/xen/arch/x86/include/asm/multicall.h b/xen/arch/x86/include/asm/multicall.h new file mode 100644 index 0000000000..7e1d4c121a --- /dev/null +++ b/xen/arch/x86/include/asm/multicall.h @@ -0,0 +1,12 @@ +/****************************************************************************** + * asm-x86/multicall.h + */ + +#ifndef __ASM_X86_MULTICALL_H__ +#define __ASM_X86_MULTICALL_H__ + +#include + +typeof(arch_do_multicall_call) pv_do_multicall_call, hvm_do_multicall_call; + +#endif /* __ASM_X86_MULTICALL_H__ */ diff --git a/xen/arch/x86/include/asm/mwait.h b/xen/arch/x86/include/asm/mwait.h new file mode 100644 index 0000000000..f377d9fdca --- /dev/null +++ b/xen/arch/x86/include/asm/mwait.h @@ -0,0 +1,19 @@ +#ifndef __ASM_X86_MWAIT_H__ +#define __ASM_X86_MWAIT_H__ + +#include + +#define MWAIT_SUBSTATE_MASK 0xf +#define MWAIT_CSTATE_MASK 0xf +#define MWAIT_SUBSTATE_SIZE 4 + +#define CPUID_MWAIT_LEAF 5 +#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 +#define CPUID5_ECX_INTERRUPT_BREAK 0x2 + +#define MWAIT_ECX_INTERRUPT_BREAK 0x1 + +void mwait_idle_with_hints(unsigned int eax, unsigned int ecx); +bool mwait_pc10_supported(void); + +#endif /* __ASM_X86_MWAIT_H__ */ diff --git a/xen/arch/x86/include/asm/nmi.h b/xen/arch/x86/include/asm/nmi.h new file mode 100644 index 0000000000..9a5da14162 --- /dev/null +++ b/xen/arch/x86/include/asm/nmi.h @@ -0,0 +1,46 @@ + +#ifndef ASM_NMI_H +#define ASM_NMI_H + +#include + +struct cpu_user_regs; + +/* Watchdog boolean from the command line */ +extern bool opt_watchdog; + +/* Watchdog force parameter from the command line */ +extern bool watchdog_force; + +/* CPU to handle platform NMI */ +extern const unsigned int nmi_cpu; + +typedef int nmi_callback_t(const struct cpu_user_regs *regs, int cpu); + +/** + * set_nmi_callback + * + * Set a handler for an NMI. Only one handler may be + * set. Return the old nmi callback handler. + */ +nmi_callback_t *set_nmi_callback(nmi_callback_t *callback); + +/** + * unset_nmi_callback + * + * Remove the handler previously set. + */ +void unset_nmi_callback(void); + +DECLARE_PER_CPU(unsigned int, nmi_count); + +/** + * trigger_nmi_continuation + * + * Schedule continuation to be started in interrupt context after NMI handling. + */ +void trigger_nmi_continuation(void); + +/* Check for NMI continuation pending. */ +bool nmi_check_continuation(void); +#endif /* ASM_NMI_H */ diff --git a/xen/arch/x86/include/asm/nops.h b/xen/arch/x86/include/asm/nops.h new file mode 100644 index 0000000000..1a46b97aff --- /dev/null +++ b/xen/arch/x86/include/asm/nops.h @@ -0,0 +1,70 @@ +#ifndef __X86_ASM_NOPS_H__ +#define __X86_ASM_NOPS_H__ + +/* + * Define nops for use with alternative(). + */ + +#define NOP_DS_PREFIX 0x3e + +/* + * Opteron 64bit nops + * 1: nop + * 2: osp nop + * 3: osp osp nop + * 4: osp osp osp nop + */ +#define K8_NOP1 0x90 +#define K8_NOP2 0x66,K8_NOP1 +#define K8_NOP3 0x66,K8_NOP2 +#define K8_NOP4 0x66,K8_NOP3 +#define K8_NOP5 K8_NOP3,K8_NOP2 +#define K8_NOP6 K8_NOP3,K8_NOP3 +#define K8_NOP7 K8_NOP4,K8_NOP3 +#define K8_NOP8 K8_NOP4,K8_NOP4 +#define K8_NOP9 K8_NOP3,K8_NOP3,K8_NOP3 + +/* + * P6 nops + * uses eax dependencies (Intel-recommended choice) + * 1: nop + * 2: osp nop + * 3: nopl (%eax) + * 4: nopl 0x00(%eax) + * 5: nopl 0x00(%eax,%eax,1) + * 6: osp nopl 0x00(%eax,%eax,1) + * 7: nopl 0x00000000(%eax) + * 8: nopl 0x00000000(%eax,%eax,1) + * 9: nopw 0x00000000(%eax,%eax,1) + * Note: All the above are assumed to be a single instruction. + * There is kernel code that depends on this. + */ +#define P6_NOP1 0x90 +#define P6_NOP2 0x66,0x90 +#define P6_NOP3 0x0f,0x1f,0x00 +#define P6_NOP4 0x0f,0x1f,0x40,0 +#define P6_NOP5 0x0f,0x1f,0x44,0x00,0 +#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0 +#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0 +#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 +#define P6_NOP9 0x66,0x0f,0x1f,0x84,0x00,0,0,0,0 + +#ifdef __ASSEMBLY__ +#define _ASM_MK_NOP(x) .byte x +#else +#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" +#endif + +#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1) +#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2) +#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3) +#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4) +#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5) +#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6) +#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7) +#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8) +#define ASM_NOP9 _ASM_MK_NOP(P6_NOP9) + +#define ASM_NOP_MAX 9 + +#endif /* __X86_ASM_NOPS_H__ */ diff --git a/xen/arch/x86/include/asm/nospec.h b/xen/arch/x86/include/asm/nospec.h new file mode 100644 index 0000000000..5312ae4c6f --- /dev/null +++ b/xen/arch/x86/include/asm/nospec.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. */ + +#ifndef _ASM_X86_NOSPEC_H +#define _ASM_X86_NOSPEC_H + +#include + +/* Allow to insert a read memory barrier into conditionals */ +static always_inline bool barrier_nospec_true(void) +{ +#ifdef CONFIG_SPECULATIVE_HARDEN_BRANCH + alternative("lfence", "", X86_FEATURE_SC_NO_BRANCH_HARDEN); +#endif + return true; +} + +/* Allow to protect evaluation of conditionals with respect to speculation */ +static always_inline bool evaluate_nospec(bool condition) +{ + return condition ? barrier_nospec_true() : !barrier_nospec_true(); +} + +/* Allow to block speculative execution in generic code */ +static always_inline void block_speculation(void) +{ + barrier_nospec_true(); +} + +#endif /* _ASM_X86_NOSPEC_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/numa.h b/xen/arch/x86/include/asm/numa.h new file mode 100644 index 0000000000..bada2c0bb9 --- /dev/null +++ b/xen/arch/x86/include/asm/numa.h @@ -0,0 +1,84 @@ +#ifndef _ASM_X8664_NUMA_H +#define _ASM_X8664_NUMA_H 1 + +#include + +#define NODES_SHIFT 6 + +typedef u8 nodeid_t; + +extern int srat_rev; + +extern nodeid_t cpu_to_node[NR_CPUS]; +extern cpumask_t node_to_cpumask[]; + +#define cpu_to_node(cpu) (cpu_to_node[cpu]) +#define parent_node(node) (node) +#define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) +#define node_to_cpumask(node) (node_to_cpumask[node]) + +struct node { + u64 start,end; +}; + +extern int compute_hash_shift(struct node *nodes, int numnodes, + nodeid_t *nodeids); +extern nodeid_t pxm_to_node(unsigned int pxm); + +#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) +#define VIRTUAL_BUG_ON(x) + +extern void numa_add_cpu(int cpu); +extern void numa_init_array(void); +extern bool numa_off; + + +extern int srat_disabled(void); +extern void numa_set_node(int cpu, nodeid_t node); +extern nodeid_t setup_node(unsigned int pxm); +extern void srat_detect_node(int cpu); + +extern void setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end); +extern nodeid_t apicid_to_node[]; +extern void init_cpu_to_node(void); + +static inline void clear_node_cpumask(int cpu) +{ + cpumask_clear_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]); +} + +/* Simple perfect hash to map pdx to node numbers */ +extern int memnode_shift; +extern unsigned long memnodemapsize; +extern u8 *memnodemap; + +struct node_data { + unsigned long node_start_pfn; + unsigned long node_spanned_pages; +}; + +extern struct node_data node_data[]; + +static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr) +{ + nodeid_t nid; + VIRTUAL_BUG_ON((paddr_to_pdx(addr) >> memnode_shift) >= memnodemapsize); + nid = memnodemap[paddr_to_pdx(addr) >> memnode_shift]; + VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); + return nid; +} + +#define NODE_DATA(nid) (&(node_data[nid])) + +#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) +#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) +#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ + NODE_DATA(nid)->node_spanned_pages) + +extern int valid_numa_range(u64 start, u64 end, nodeid_t node); + +void srat_parse_regions(u64 addr); +extern u8 __node_distance(nodeid_t a, nodeid_t b); +unsigned int arch_get_dma_bitsize(void); + +#endif diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h new file mode 100644 index 0000000000..357a808748 --- /dev/null +++ b/xen/arch/x86/include/asm/p2m.h @@ -0,0 +1,1022 @@ +/****************************************************************************** + * include/asm-x86/paging.h + * + * physical-to-machine mappings for automatically-translated domains. + * + * Copyright (c) 2011 GridCentric Inc. (Andres Lagar-Cavilla) + * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) + * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc. + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef _XEN_ASM_X86_P2M_H +#define _XEN_ASM_X86_P2M_H + +#include +#include +#include +#include /* for pagetable_t */ + +/* Debugging and auditing of the P2M code? */ +#if !defined(NDEBUG) && defined(CONFIG_HVM) +#define P2M_AUDIT 1 +#else +#define P2M_AUDIT 0 +#endif +#define P2M_DEBUGGING 0 + +extern bool_t opt_hap_1gb, opt_hap_2mb; + +/* + * The upper levels of the p2m pagetable always contain full rights; all + * variation in the access control bits is made in the level-1 PTEs. + * + * In addition to the phys-to-machine translation, each p2m PTE contains + * *type* information about the gfn it translates, helping Xen to decide + * on the correct course of action when handling a page-fault to that + * guest frame. We store the type in the "available" bits of the PTEs + * in the table, which gives us 8 possible types on 32-bit systems. + * Further expansions of the type system will only be supported on + * 64-bit Xen. + */ + +/* + * AMD IOMMU: When we share p2m table with iommu, bit 52 -bit 58 in pte + * cannot be non-zero, otherwise, hardware generates io page faults when + * device access those pages. Therefore, p2m_ram_rw has to be defined as 0. + */ +typedef enum { + p2m_ram_rw = 0, /* Normal read/write guest RAM */ + p2m_invalid = 1, /* Nothing mapped here */ + p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */ + p2m_ram_ro = 3, /* Read-only; writes are silently dropped */ + p2m_mmio_dm = 4, /* Reads and write go to the device model */ + p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */ + p2m_populate_on_demand = 6, /* Place-holder for empty memory */ + + /* Although these are defined in all builds, they can only + * be used in 64-bit builds */ + p2m_grant_map_rw = 7, /* Read/write grant mapping */ + p2m_grant_map_ro = 8, /* Read-only grant mapping */ + p2m_ram_paging_out = 9, /* Memory that is being paged out */ + p2m_ram_paged = 10, /* Memory that has been paged out */ + p2m_ram_paging_in = 11, /* Memory that is being paged in */ + p2m_ram_shared = 12, /* Shared or sharable memory */ + p2m_ram_broken = 13, /* Broken page, access cause domain crash */ + p2m_map_foreign = 14, /* ram pages from foreign domain */ + p2m_ioreq_server = 15, +} p2m_type_t; + +/* Modifiers to the query */ +typedef unsigned int p2m_query_t; +#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ +#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ + +/* We use bitmaps and maks to handle groups of types */ +#define p2m_to_mask(_t) (1UL << (_t)) + +/* RAM types, which map to real machine frames */ +#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \ + | p2m_to_mask(p2m_ram_logdirty) \ + | p2m_to_mask(p2m_ram_ro) \ + | p2m_to_mask(p2m_ram_paging_out) \ + | p2m_to_mask(p2m_ram_paged) \ + | p2m_to_mask(p2m_ram_paging_in) \ + | p2m_to_mask(p2m_ram_shared) \ + | p2m_to_mask(p2m_ioreq_server)) + +/* Types that represent a physmap hole that is ok to replace with a shared + * entry */ +#define P2M_HOLE_TYPES (p2m_to_mask(p2m_mmio_dm) \ + | p2m_to_mask(p2m_invalid) \ + | p2m_to_mask(p2m_ram_paging_in) \ + | p2m_to_mask(p2m_ram_paged)) + +/* Grant mapping types, which map to a real machine frame in another + * VM */ +#define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) \ + | p2m_to_mask(p2m_grant_map_ro) ) + +/* MMIO types, which don't have to map to anything in the frametable */ +#define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_dm) \ + | p2m_to_mask(p2m_mmio_direct)) + +/* Read-only types, which must have the _PAGE_RW bit clear in their PTEs */ +#define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \ + | p2m_to_mask(p2m_ram_ro) \ + | p2m_to_mask(p2m_grant_map_ro) \ + | p2m_to_mask(p2m_ram_shared)) + +/* Write-discard types, which should discard the write operations */ +#define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro) \ + | p2m_to_mask(p2m_grant_map_ro)) + +/* Types that can be subject to bulk transitions. */ +#define P2M_CHANGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \ + | p2m_to_mask(p2m_ram_logdirty) \ + | p2m_to_mask(p2m_ioreq_server) ) + +#define P2M_POD_TYPES (p2m_to_mask(p2m_populate_on_demand)) + +/* Pageable types */ +#define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \ + | p2m_to_mask(p2m_ram_logdirty) ) + +#ifdef CONFIG_MEM_PAGING +#define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out) \ + | p2m_to_mask(p2m_ram_paged) \ + | p2m_to_mask(p2m_ram_paging_in)) + +#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged)) +#else +#define P2M_PAGING_TYPES 0 +#define P2M_PAGED_TYPES 0 +#endif + +/* Shared types */ +/* XXX: Sharable types could include p2m_ram_ro too, but we would need to + * reinit the type correctly after fault */ +#define P2M_SHARABLE_TYPES (p2m_to_mask(p2m_ram_rw) \ + | p2m_to_mask(p2m_ram_logdirty) ) +#define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared)) + +/* Types established/cleaned up via special accessors. */ +#define P2M_SPECIAL_TYPES (P2M_GRANT_TYPES | \ + p2m_to_mask(p2m_map_foreign) | \ + p2m_to_mask(p2m_mmio_direct)) + +/* Valid types not necessarily associated with a (valid) MFN. */ +#define P2M_INVALID_MFN_TYPES (P2M_POD_TYPES \ + | p2m_to_mask(p2m_mmio_direct) \ + | P2M_PAGING_TYPES) + +/* Broken type: the frame backing this pfn has failed in hardware + * and must not be touched. */ +#define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken)) + +/* Useful predicates */ +#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES) +#define p2m_is_hole(_t) (p2m_to_mask(_t) & P2M_HOLE_TYPES) +#define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES) +#define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES) +#define p2m_is_discard_write(_t) (p2m_to_mask(_t) & P2M_DISCARD_WRITE_TYPES) +#define p2m_is_changeable(_t) (p2m_to_mask(_t) & P2M_CHANGEABLE_TYPES) +#define p2m_is_pod(_t) (p2m_to_mask(_t) & P2M_POD_TYPES) +#define p2m_is_grant(_t) (p2m_to_mask(_t) & P2M_GRANT_TYPES) +/* Grant types are *not* considered valid, because they can be + unmapped at any time and, unless you happen to be the shadow or p2m + implementations, there's no way of synchronising against that. */ +#define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES)) +#define p2m_has_emt(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | p2m_to_mask(p2m_mmio_direct))) +#define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES) +#define p2m_is_paging(_t) (p2m_to_mask(_t) & P2M_PAGING_TYPES) +#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES) +#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES) +#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES) +#define p2m_is_special(_t) (p2m_to_mask(_t) & P2M_SPECIAL_TYPES) +#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES) +#define p2m_is_foreign(_t) (p2m_to_mask(_t) & p2m_to_mask(p2m_map_foreign)) + +#define p2m_is_any_ram(_t) (p2m_to_mask(_t) & \ + (P2M_RAM_TYPES | P2M_GRANT_TYPES | \ + p2m_to_mask(p2m_map_foreign))) + +#define p2m_allows_invalid_mfn(t) (p2m_to_mask(t) & P2M_INVALID_MFN_TYPES) + +typedef enum { + p2m_host, + p2m_nested, + p2m_alternate, +} p2m_class_t; + +/* Per-p2m-table state */ +struct p2m_domain { + /* Lock that protects updates to the p2m */ + mm_rwlock_t lock; + + /* Shadow translated domain: p2m mapping */ + pagetable_t phys_table; + + /* + * Same as a domain's dirty_cpumask but limited to + * this p2m and those physical cpus whose vcpu's are in + * guestmode. + */ + cpumask_var_t dirty_cpumask; + + struct domain *domain; /* back pointer to domain */ + + p2m_class_t p2m_class; /* host/nested/alternate */ + + /* + * Default P2M access type for each page in the the domain: new pages, + * swapped in pages, cleared pages, and pages that are ambiguously + * retyped get this access type. See definition of p2m_access_t. + */ + p2m_access_t default_access; + + /* Pages used to construct the p2m */ + struct page_list_head pages; + + /* Host p2m: Log-dirty ranges registered for the domain. */ + struct rangeset *logdirty_ranges; + + /* Host p2m: Global log-dirty mode enabled for the domain. */ + bool global_logdirty; + +#ifdef CONFIG_HVM + /* Alternate p2m: count of vcpu's currently using this p2m. */ + atomic_t active_vcpus; + + int (*set_entry)(struct p2m_domain *p2m, + gfn_t gfn, + mfn_t mfn, unsigned int page_order, + p2m_type_t p2mt, + p2m_access_t p2ma, + int sve); + mfn_t (*get_entry)(struct p2m_domain *p2m, + gfn_t gfn, + p2m_type_t *p2mt, + p2m_access_t *p2ma, + p2m_query_t q, + unsigned int *page_order, + bool_t *sve); + int (*recalc)(struct p2m_domain *p2m, + unsigned long gfn); + void (*enable_hardware_log_dirty)(struct p2m_domain *p2m); + void (*disable_hardware_log_dirty)(struct p2m_domain *p2m); + void (*flush_hardware_cached_dirty)(struct p2m_domain *p2m); + void (*change_entry_type_global)(struct p2m_domain *p2m, + p2m_type_t ot, + p2m_type_t nt); + int (*change_entry_type_range)(struct p2m_domain *p2m, + p2m_type_t ot, p2m_type_t nt, + unsigned long first_gfn, + unsigned long last_gfn); + void (*memory_type_changed)(struct p2m_domain *p2m); + void (*write_p2m_entry_pre)(struct domain *d, + unsigned long gfn, + l1_pgentry_t old, + l1_pgentry_t new, + unsigned int level); + void (*write_p2m_entry_post)(struct p2m_domain *p2m, + unsigned int oflags); +#endif +#if P2M_AUDIT + long (*audit_p2m)(struct p2m_domain *p2m); +#endif + + /* + * P2M updates may require TLBs to be flushed (invalidated). + * + * If 'defer_flush' is set, flushes may be deferred by setting + * 'need_flush' and then flushing in 'tlb_flush()'. + * + * 'tlb_flush()' is only called if 'need_flush' was set. + * + * If a flush may be being deferred but an immediate flush is + * required (e.g., if a page is being freed to pool other than the + * domheap), call p2m_tlb_flush_sync(). + */ + void (*tlb_flush)(struct p2m_domain *p2m); + unsigned int defer_flush; + bool_t need_flush; + + /* If true, and an access fault comes in and there is no vm_event listener, + * pause domain. Otherwise, remove access restrictions. */ + bool_t access_required; + + /* Highest guest frame that's ever been mapped in the p2m */ + unsigned long max_mapped_pfn; + + /* + * Alternate p2m's only: range of gfn's for which underlying + * mfn may have duplicate mappings + */ + unsigned long min_remapped_gfn; + unsigned long max_remapped_gfn; + +#ifdef CONFIG_HVM + /* Populate-on-demand variables + * All variables are protected with the pod lock. We cannot rely on + * the p2m lock if it's turned into a fine-grained lock. + * We only use the domain page_alloc lock for additions and + * deletions to the domain's page list. Because we use it nested + * within the PoD lock, we enforce it's ordering (by remembering + * the unlock level in the arch_domain sub struct). */ + struct { + struct page_list_head super, /* List of superpages */ + single; /* Non-super lists */ + long count, /* # of pages in cache lists */ + entry_count; /* # of pages in p2m marked pod */ + gfn_t reclaim_single; /* Last gfn of a scan */ + gfn_t max_guest; /* gfn of max guest demand-populate */ + + /* + * Tracking of the most recently populated PoD pages, for eager + * reclamation. + */ + struct pod_mrp_list { +#define NR_POD_MRP_ENTRIES 32 + +/* Encode ORDER_2M superpage in top bit of GFN */ +#define POD_LAST_SUPERPAGE (gfn_x(INVALID_GFN) & ~(gfn_x(INVALID_GFN) >> 1)) + + unsigned long list[NR_POD_MRP_ENTRIES]; + unsigned int idx; + } mrp; + mm_lock_t lock; /* Locking of private pod structs, * + * not relying on the p2m lock. */ + } pod; + + /* + * Host p2m: when this flag is set, don't flush all the nested-p2m + * tables on every host-p2m change. The setter of this flag + * is responsible for performing the full flush before releasing the + * host p2m's lock. + */ + bool defer_nested_flush; + + /* + * Nested p2ms only: nested p2m base value that this p2m shadows. + * This can be cleared to P2M_BASE_EADDR under the per-p2m lock but + * needs both the per-p2m lock and the per-domain nestedp2m lock + * to set it to any other value. + */ +#define P2M_BASE_EADDR (~0ULL) + uint64_t np2m_base; + uint64_t np2m_generation; + + /* + * Nested p2ms: linked list of n2pms allocated to this domain. + * The host p2m hasolds the head of the list and the np2ms are + * threaded on in LRU order. + */ + struct list_head np2m_list; +#endif + + union { + struct ept_data ept; + /* NPT-equivalent structure could be added here. */ + }; + + struct { + spinlock_t lock; + /* + * ioreq server who's responsible for the emulation of + * gfns with specific p2m type(for now, p2m_ioreq_server). + */ + struct ioreq_server *server; + /* + * flags specifies whether read, write or both operations + * are to be emulated by an ioreq server. + */ + unsigned int flags; + unsigned long entry_count; + } ioreq; +}; + +/* get host p2m table */ +#define p2m_get_hostp2m(d) ((d)->arch.p2m) + +/* All common type definitions should live ahead of this inclusion. */ +#ifdef _XEN_P2M_COMMON_H +# error "xen/p2m-common.h should not be included directly" +#endif +#include + +static inline bool arch_acquire_resource_check(struct domain *d) +{ + /* + * FIXME: Until foreign pages inserted into the P2M are properly + * reference counted, it is unsafe to allow mapping of + * resource pages unless the caller is the hardware domain + * (see set_foreign_p2m_entry()). + */ + return !paging_mode_translate(d) || is_hardware_domain(d); +} + +/* + * Updates vCPU's n2pm to match its np2m_base in VMCx12 and returns that np2m. + */ +struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v); +/* Similar to the above except that returned p2m is still write-locked */ +struct p2m_domain *p2m_get_nestedp2m_locked(struct vcpu *v); + +/* If vcpu is in host mode then behaviour matches p2m_get_hostp2m(). + * If vcpu is in guest mode then behaviour matches p2m_get_nestedp2m(). + */ +struct p2m_domain *p2m_get_p2m(struct vcpu *v); + +#define NP2M_SCHEDLE_IN 0 +#define NP2M_SCHEDLE_OUT 1 + +#ifdef CONFIG_HVM +void np2m_schedule(int dir); +#else +static inline void np2m_schedule(int dir) {} +#endif + +static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m) +{ + return p2m->p2m_class == p2m_host; +} + +static inline bool_t p2m_is_nestedp2m(const struct p2m_domain *p2m) +{ + return p2m->p2m_class == p2m_nested; +} + +static inline bool_t p2m_is_altp2m(const struct p2m_domain *p2m) +{ + return p2m->p2m_class == p2m_alternate; +} + +#define p2m_get_pagetable(p2m) ((p2m)->phys_table) + +/* + * Ensure any deferred p2m TLB flush has been completed on all VCPUs. + */ +void p2m_tlb_flush_sync(struct p2m_domain *p2m); +void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m); + +/**** p2m query accessors. They lock p2m_lock, and thus serialize + * lookups wrt modifications. They _do not_ release the lock on exit. + * After calling any of the variants below, caller needs to use + * put_gfn. ****/ + +mfn_t __nonnull(3, 4) __get_gfn_type_access( + struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, + p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked); + +/* Read a particular P2M table, mapping pages as we go. Most callers + * should _not_ call this directly; use the other get_gfn* functions + * below unless you know you want to walk a p2m that isn't a domain's + * main one. + * If the lookup succeeds, the return value is != INVALID_MFN and + * *page_order is filled in with the order of the superpage (if any) that + * the entry was found in. */ +static inline mfn_t __nonnull(3, 4) get_gfn_type_access( + struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, + p2m_access_t *a, p2m_query_t q, unsigned int *page_order) +{ + return __get_gfn_type_access(p2m, gfn, t, a, q, page_order, true); +} + +/* General conversion function from gfn to mfn */ +static inline mfn_t __nonnull(3) get_gfn_type( + struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) +{ + p2m_access_t a; + return get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, q, NULL); +} + +/* Syntactic sugar: most callers will use one of these. */ +#define get_gfn(d, g, t) get_gfn_type((d), (g), (t), P2M_ALLOC) +#define get_gfn_query(d, g, t) get_gfn_type((d), (g), (t), 0) +#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \ + P2M_ALLOC | P2M_UNSHARE) + +/* Will release the p2m_lock for this gfn entry. */ +void __put_gfn(struct p2m_domain *p2m, unsigned long gfn); + +#define put_gfn(d, gfn) __put_gfn(p2m_get_hostp2m((d)), (gfn)) + +/* The intent of the "unlocked" accessor is to have the caller not worry about + * put_gfn. They apply to very specific situations: debug printk's, dumps + * during a domain crash, or to peek at a p2m entry/type. Caller is not + * holding the p2m entry exclusively during or after calling this. + * + * This is also used in the shadow code whenever the paging lock is + * held -- in those cases, the caller is protected against concurrent + * p2m updates by the fact that write_p2m_entry() also takes + * the paging lock. + * + * Note that an unlocked accessor only makes sense for a "query" lookup. + * Any other type of query can cause a change in the p2m and may need to + * perform locking. + */ +static inline mfn_t get_gfn_query_unlocked(struct domain *d, + unsigned long gfn, + p2m_type_t *t) +{ + p2m_access_t a; + return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0); +} + +/* Atomically look up a GFN and take a reference count on the backing page. + * This makes sure the page doesn't get freed (or shared) underfoot, + * and should be used by any path that intends to write to the backing page. + * Returns NULL if the page is not backed by RAM. + * The caller is responsible for calling put_page() afterwards. */ +struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn, + p2m_type_t *t, p2m_access_t *a, + p2m_query_t q); + +static inline struct page_info *get_page_from_gfn( + struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) +{ + struct page_info *page; + + if ( paging_mode_translate(d) ) + return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, q); + + /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */ + if ( t ) + *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct; + page = mfn_to_page(_mfn(gfn)); + return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL; +} + +/* General conversion function from mfn to gfn */ +static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) +{ + if ( paging_mode_translate(d) ) + return _gfn(get_gpfn_from_mfn(mfn_x(mfn))); + else + return _gfn(mfn_x(mfn)); +} + +#ifdef CONFIG_HVM +#define AP2MGET_prepopulate true +#define AP2MGET_query false + +/* + * Looks up altp2m entry. If the entry is not found it looks up the entry in + * hostp2m. + * The prepopulate param is used to set the found entry in altp2m. + */ +int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a, + bool prepopulate); +#endif + +/* Init the datastructures for later use by the p2m code */ +int p2m_init(struct domain *d); + +/* Allocate a new p2m table for a domain. + * + * Returns 0 for success or -errno. */ +int p2m_alloc_table(struct p2m_domain *p2m); + +/* Return all the p2m resources to Xen. */ +void p2m_teardown(struct p2m_domain *p2m); +void p2m_final_teardown(struct domain *d); + +/* Add a page to a domain's p2m table */ +int guest_physmap_add_entry(struct domain *d, gfn_t gfn, + mfn_t mfn, unsigned int page_order, + p2m_type_t t); + +/* Untyped version for RAM only, for compatibility and PV. */ +int __must_check guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int page_order); + +/* Set a p2m range as populate-on-demand */ +int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, + unsigned int order); + +#ifdef CONFIG_HVM + +/* Enable hardware-assisted log-dirty. */ +void p2m_enable_hardware_log_dirty(struct domain *d); + +/* Disable hardware-assisted log-dirty */ +void p2m_disable_hardware_log_dirty(struct domain *d); + +/* Flush hardware cached dirty GFNs */ +void p2m_flush_hardware_cached_dirty(struct domain *d); + +#else + +static inline void p2m_flush_hardware_cached_dirty(struct domain *d) {} + +#endif + +/* Change types across all p2m entries in a domain */ +void p2m_change_entry_type_global(struct domain *d, + p2m_type_t ot, p2m_type_t nt); + +/* Change types across a range of p2m entries (start ... end-1) */ +void p2m_change_type_range(struct domain *d, + unsigned long start, unsigned long end, + p2m_type_t ot, p2m_type_t nt); + +/* Compare-exchange the type of a single p2m entry */ +int p2m_change_type_one(struct domain *d, unsigned long gfn, + p2m_type_t ot, p2m_type_t nt); + +/* Synchronously change the p2m type for a range of gfns */ +int p2m_finish_type_change(struct domain *d, + gfn_t first_gfn, + unsigned long max_nr); + +int p2m_is_logdirty_range(struct p2m_domain *, unsigned long start, + unsigned long end); + +/* Set mmio addresses in the p2m table (for pass-through) */ +int set_mmio_p2m_entry(struct domain *d, gfn_t gfn, mfn_t mfn, + unsigned int order); + +/* Set identity addresses in the p2m table (for pass-through) */ +int set_identity_p2m_entry(struct domain *d, unsigned long gfn, + p2m_access_t p2ma, unsigned int flag); +int clear_identity_p2m_entry(struct domain *d, unsigned long gfn); + +/* + * Populate-on-demand + */ + +/* Dump PoD information about the domain */ +void p2m_pod_dump_data(struct domain *d); + +#ifdef CONFIG_HVM + +/* Report a change affecting memory types. */ +void p2m_memory_type_changed(struct domain *d); + +/* Called by p2m code when demand-populating a PoD page */ +bool +p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order); + +/* Move all pages from the populate-on-demand cache to the domain page_list + * (usually in preparation for domain destruction) */ +int p2m_pod_empty_cache(struct domain *d); + +/* Set populate-on-demand cache size so that the total memory allocated to a + * domain matches target */ +int p2m_pod_set_mem_target(struct domain *d, unsigned long target); + +/* Scan pod cache when offline/broken page triggered */ +int +p2m_pod_offline_or_broken_hit(struct page_info *p); + +/* Replace pod cache when offline/broken page triggered */ +void +p2m_pod_offline_or_broken_replace(struct page_info *p); + +static inline long p2m_pod_entry_count(const struct p2m_domain *p2m) +{ + return p2m->pod.entry_count; +} + +void p2m_pod_init(struct p2m_domain *p2m); + +#else + +static inline bool +p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order) +{ + return false; +} + +static inline int p2m_pod_empty_cache(struct domain *d) +{ + return 0; +} + +static inline int p2m_pod_offline_or_broken_hit(struct page_info *p) +{ + return 0; +} + +static inline void p2m_pod_offline_or_broken_replace(struct page_info *p) +{ + ASSERT_UNREACHABLE(); +} + +static inline long p2m_pod_entry_count(const struct p2m_domain *p2m) +{ + return 0; +} + +static inline void p2m_pod_init(struct p2m_domain *p2m) {} + +#endif + + +/* + * Paging to disk and page-sharing + */ + +/* Modify p2m table for shared gfn */ +int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); + +/* Tell xenpaging to drop a paged out frame */ +void p2m_mem_paging_drop_page(struct domain *d, gfn_t gfn, p2m_type_t p2mt); +/* Start populating a paged out frame */ +void p2m_mem_paging_populate(struct domain *d, gfn_t gfn); +/* Resume normal operation (in case a domain was paused) */ +struct vm_event_st; +void p2m_mem_paging_resume(struct domain *d, struct vm_event_st *rsp); + +/* + * Internal functions, only called by other p2m code + */ + +mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level); +void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg); + +/* Directly set a p2m entry: only for use by p2m code. Does not need + * a call to put_gfn afterwards/ */ +int __must_check p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn, + unsigned int page_order, p2m_type_t p2mt, + p2m_access_t p2ma); + +#if defined(CONFIG_HVM) +/* Set up function pointers for PT implementation: only for use by p2m code */ +extern void p2m_pt_init(struct p2m_domain *p2m); +#else +static inline void p2m_pt_init(struct p2m_domain *p2m) {} +#endif + +void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, + p2m_query_t q, uint32_t *pfec); + +#if P2M_AUDIT +extern void audit_p2m(struct domain *d, + uint64_t *orphans, + uint64_t *m2p_bad, + uint64_t *p2m_bad); +#endif /* P2M_AUDIT */ + +/* Printouts */ +#define P2M_PRINTK(f, a...) \ + debugtrace_printk("p2m: %s(): " f, __func__, ##a) +#define P2M_ERROR(f, a...) \ + printk(XENLOG_G_ERR "pg error: %s(): " f, __func__, ##a) +#if P2M_DEBUGGING +#define P2M_DEBUG(f, a...) \ + debugtrace_printk("p2mdebug: %s(): " f, __func__, ##a) +#else +#define P2M_DEBUG(f, a...) do { (void)(f); } while(0) +#endif + +/* + * Functions specific to the p2m-pt implementation + */ + +/* Extract the type from the PTE flags that store it */ +static inline p2m_type_t p2m_flags_to_type(unsigned int flags) +{ + /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need + * to make sure that an entirely empty PTE doesn't have RAM type */ + if ( flags == 0 ) + return p2m_invalid; + /* AMD IOMMUs use bits 9-11 to encode next io page level and bits + * 59-62 for iommu flags so we can't use them to store p2m type info. */ + return (flags >> 12) & 0x7f; +} + +static inline p2m_type_t p2m_recalc_type_range(bool recalc, p2m_type_t t, + struct p2m_domain *p2m, + unsigned long gfn_start, + unsigned long gfn_end) +{ + if ( !recalc || !p2m_is_changeable(t) ) + return t; + + if ( t == p2m_ioreq_server && p2m->ioreq.server != NULL ) + return t; + + return p2m_is_logdirty_range(p2m, gfn_start, gfn_end) ? p2m_ram_logdirty + : p2m_ram_rw; +} + +static inline p2m_type_t p2m_recalc_type(bool recalc, p2m_type_t t, + struct p2m_domain *p2m, + unsigned long gfn) +{ + return p2m_recalc_type_range(recalc, t, p2m, gfn, gfn); +} + +int p2m_pt_handle_deferred_changes(uint64_t gpa); + +/* + * Nested p2m: shadow p2m tables used for nested HVM virtualization + */ + +/* Flushes specified p2m table */ +void p2m_flush(struct vcpu *v, struct p2m_domain *p2m); +/* Flushes all nested p2m tables */ +void p2m_flush_nestedp2m(struct domain *d); +/* Flushes the np2m specified by np2m_base (if it exists) */ +void np2m_flush_base(struct vcpu *v, unsigned long np2m_base); + +void hap_p2m_init(struct p2m_domain *p2m); +void shadow_p2m_init(struct p2m_domain *p2m); + +void nestedp2m_write_p2m_entry_post(struct p2m_domain *p2m, + unsigned int oflags); + +/* + * Alternate p2m: shadow p2m tables used for alternate memory views + */ +#ifdef CONFIG_HVM +/* get current alternate p2m table */ +static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v) +{ + unsigned int index = vcpu_altp2m(v).p2midx; + + if ( index == INVALID_ALTP2M ) + return NULL; + + BUG_ON(index >= MAX_ALTP2M); + + return v->domain->arch.altp2m_p2m[index]; +} + +/* Switch alternate p2m for a single vcpu */ +bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx); + +/* Check to see if vcpu should be switched to a different p2m. */ +void p2m_altp2m_check(struct vcpu *v, uint16_t idx); + +/* Flush all the alternate p2m's for a domain */ +void p2m_flush_altp2m(struct domain *d); + +/* Alternate p2m paging */ +bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l, + mfn_t *mfn, p2m_type_t *p2mt, + p2m_access_t *p2ma, unsigned int page_order); + +/* Make a specific alternate p2m valid */ +int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx); + +/* Find an available alternate p2m and make it valid */ +int p2m_init_next_altp2m(struct domain *d, uint16_t *idx, + xenmem_access_t hvmmem_default_access); + +/* Make a specific alternate p2m invalid */ +int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx); + +/* Switch alternate p2m for entire domain */ +int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx); + +/* Change a gfn->mfn mapping */ +int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, + gfn_t old_gfn, gfn_t new_gfn); + +/* Propagate a host p2m change to all alternate p2m's */ +int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, + mfn_t mfn, unsigned int page_order, + p2m_type_t p2mt, p2m_access_t p2ma); + +/* Set a specific p2m view visibility */ +int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int idx, + uint8_t visible); +#else +struct p2m_domain *p2m_get_altp2m(struct vcpu *v); +static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) {} +#endif + +/* p2m access to IOMMU flags */ +static inline unsigned int p2m_access_to_iommu_flags(p2m_access_t p2ma) +{ + switch ( p2ma ) + { + case p2m_access_rw: + case p2m_access_rwx: + return IOMMUF_readable | IOMMUF_writable; + + case p2m_access_r: + case p2m_access_rx: + case p2m_access_rx2rw: + return IOMMUF_readable; + + case p2m_access_w: + case p2m_access_wx: + return IOMMUF_writable; + + case p2m_access_n: + case p2m_access_x: + case p2m_access_n2rwx: + return 0; + } + + ASSERT_UNREACHABLE(); + return 0; +} + +/* + * p2m type to IOMMU flags + */ +static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt, + p2m_access_t p2ma, mfn_t mfn) +{ + unsigned int flags; + + switch( p2mt ) + { + case p2m_ram_rw: + case p2m_grant_map_rw: + case p2m_ram_logdirty: + case p2m_map_foreign: + flags = IOMMUF_readable | IOMMUF_writable; + break; + case p2m_ram_ro: + case p2m_grant_map_ro: + flags = IOMMUF_readable; + break; + case p2m_mmio_direct: + flags = p2m_access_to_iommu_flags(p2ma); + if ( (flags & IOMMUF_writable) && + rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) ) + flags &= ~IOMMUF_writable; + break; + default: + flags = 0; + break; + } + + return flags; +} + +int p2m_set_ioreq_server(struct domain *d, unsigned int flags, + struct ioreq_server *s); +struct ioreq_server *p2m_get_ioreq_server(struct domain *d, + unsigned int *flags); + +static inline int p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt, + p2m_type_t ot, mfn_t nfn, mfn_t ofn, + unsigned int level) +{ + BUG_ON(!level); + BUG_ON(level > 1 && (nt == p2m_ioreq_server || nt == p2m_map_foreign)); + + if ( level != 1 || (nt == ot && mfn_eq(nfn, ofn)) ) + return 0; + + switch ( nt ) + { + case p2m_ioreq_server: + /* + * p2m_ioreq_server is only used for 4K pages, so + * the count is only done for level 1 entries. + */ + p2m->ioreq.entry_count++; + break; + + case p2m_map_foreign: + if ( !mfn_valid(nfn) ) + { + ASSERT_UNREACHABLE(); + return -EINVAL; + } + + if ( !page_get_owner_and_reference(mfn_to_page(nfn)) ) + return -EBUSY; + + break; + + default: + break; + } + + switch ( ot ) + { + case p2m_ioreq_server: + ASSERT(p2m->ioreq.entry_count > 0); + p2m->ioreq.entry_count--; + break; + + case p2m_map_foreign: + if ( !mfn_valid(ofn) ) + { + ASSERT_UNREACHABLE(); + return -EINVAL; + } + put_page(mfn_to_page(ofn)); + break; + + default: + break; + } + + return 0; +} + +#endif /* _XEN_ASM_X86_P2M_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/page-bits.h b/xen/arch/x86/include/asm/page-bits.h new file mode 100644 index 0000000000..6f7fc7d035 --- /dev/null +++ b/xen/arch/x86/include/asm/page-bits.h @@ -0,0 +1,26 @@ +#ifndef __X86_PAGE_SHIFT_H__ +#define __X86_PAGE_SHIFT_H__ + +#define L1_PAGETABLE_SHIFT 12 +#define L2_PAGETABLE_SHIFT 21 +#define L3_PAGETABLE_SHIFT 30 +#define L4_PAGETABLE_SHIFT 39 +#define PAGE_SHIFT L1_PAGETABLE_SHIFT +#define SUPERPAGE_SHIFT L2_PAGETABLE_SHIFT +#define ROOT_PAGETABLE_SHIFT L4_PAGETABLE_SHIFT + +#define PAGETABLE_ORDER 9 +#define L1_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) +#define L2_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) +#define L3_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) +#define L4_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) +#define ROOT_PAGETABLE_ENTRIES L4_PAGETABLE_ENTRIES + +#define SUPERPAGE_ORDER PAGETABLE_ORDER +#define SUPERPAGE_PAGES (1 << SUPERPAGE_ORDER) + +/* These are architectural limits. */ +#define PADDR_BITS 52 +#define VADDR_BITS 48 + +#endif /* __X86_PAGE_SHIFT_H__ */ diff --git a/xen/arch/x86/include/asm/page.h b/xen/arch/x86/include/asm/page.h new file mode 100644 index 0000000000..1d080cffbe --- /dev/null +++ b/xen/arch/x86/include/asm/page.h @@ -0,0 +1,409 @@ +#ifndef __X86_PAGE_H__ +#define __X86_PAGE_H__ + +#include +#include + +#define PAGE_ORDER_4K 0 +#define PAGE_ORDER_2M 9 +#define PAGE_ORDER_1G 18 + +#ifndef __ASSEMBLY__ +# include +# include +#endif + +#include + +/* Read a pte atomically from memory. */ +#define l1e_read_atomic(l1ep) \ + l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep)))) +#define l2e_read_atomic(l2ep) \ + l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep)))) +#define l3e_read_atomic(l3ep) \ + l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep)))) +#define l4e_read_atomic(l4ep) \ + l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep)))) + +/* Write a pte atomically to memory. */ +#define l1e_write_atomic(l1ep, l1e) \ + pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e)) +#define l2e_write_atomic(l2ep, l2e) \ + pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e)) +#define l3e_write_atomic(l3ep, l3e) \ + pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e)) +#define l4e_write_atomic(l4ep, l4e) \ + pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e)) + +/* + * Write a pte safely but non-atomically to memory. + * The PTE may become temporarily not-present during the update. + */ +#define l1e_write(l1ep, l1e) \ + pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e)) +#define l2e_write(l2ep, l2e) \ + pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e)) +#define l3e_write(l3ep, l3e) \ + pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e)) +#define l4e_write(l4ep, l4e) \ + pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e)) + +/* Get direct integer representation of a pte's contents (intpte_t). */ +#define l1e_get_intpte(x) ((x).l1) +#define l2e_get_intpte(x) ((x).l2) +#define l3e_get_intpte(x) ((x).l3) +#define l4e_get_intpte(x) ((x).l4) + +/* Get pfn mapped by pte (unsigned long). */ +#define l1e_get_pfn(x) \ + ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) +#define l2e_get_pfn(x) \ + ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) +#define l3e_get_pfn(x) \ + ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) +#define l4e_get_pfn(x) \ + ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) + +/* Get mfn mapped by pte (mfn_t). */ +#define l1e_get_mfn(x) _mfn(l1e_get_pfn(x)) +#define l2e_get_mfn(x) _mfn(l2e_get_pfn(x)) +#define l3e_get_mfn(x) _mfn(l3e_get_pfn(x)) +#define l4e_get_mfn(x) _mfn(l4e_get_pfn(x)) + +/* Get physical address of page mapped by pte (paddr_t). */ +#define l1e_get_paddr(x) \ + ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK)))) +#define l2e_get_paddr(x) \ + ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK)))) +#define l3e_get_paddr(x) \ + ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK)))) +#define l4e_get_paddr(x) \ + ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK)))) + +/* Get pointer to info structure of page mapped by pte (struct page_info *). */ +#define l1e_get_page(x) mfn_to_page(l1e_get_mfn(x)) +#define l2e_get_page(x) mfn_to_page(l2e_get_mfn(x)) +#define l3e_get_page(x) mfn_to_page(l3e_get_mfn(x)) +#define l4e_get_page(x) mfn_to_page(l4e_get_mfn(x)) + +/* Get pte access flags (unsigned int). */ +#define l1e_get_flags(x) (get_pte_flags((x).l1)) +#define l2e_get_flags(x) (get_pte_flags((x).l2)) +#define l3e_get_flags(x) (get_pte_flags((x).l3)) +#define l4e_get_flags(x) (get_pte_flags((x).l4)) + +/* Get pte pkeys (unsigned int). */ +#define l1e_get_pkey(x) get_pte_pkey((x).l1) +#define l2e_get_pkey(x) get_pte_pkey((x).l2) +#define l3e_get_pkey(x) get_pte_pkey((x).l3) + +/* Construct an empty pte. */ +#define l1e_empty() ((l1_pgentry_t) { 0 }) +#define l2e_empty() ((l2_pgentry_t) { 0 }) +#define l3e_empty() ((l3_pgentry_t) { 0 }) +#define l4e_empty() ((l4_pgentry_t) { 0 }) + +/* Construct a pte from a pfn and access flags. */ +#define l1e_from_pfn(pfn, flags) \ + ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) +#define l2e_from_pfn(pfn, flags) \ + ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) +#define l3e_from_pfn(pfn, flags) \ + ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) +#define l4e_from_pfn(pfn, flags) \ + ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) + +/* Construct a pte from an mfn and access flags. */ +#define l1e_from_mfn(m, f) l1e_from_pfn(mfn_x(m), f) +#define l2e_from_mfn(m, f) l2e_from_pfn(mfn_x(m), f) +#define l3e_from_mfn(m, f) l3e_from_pfn(mfn_x(m), f) +#define l4e_from_mfn(m, f) l4e_from_pfn(mfn_x(m), f) + +/* Construct a pte from a physical address and access flags. */ +#ifndef __ASSEMBLY__ +static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags) +{ + ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); + return (l1_pgentry_t) { pa | put_pte_flags(flags) }; +} +static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags) +{ + ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); + return (l2_pgentry_t) { pa | put_pte_flags(flags) }; +} +static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags) +{ + ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); + return (l3_pgentry_t) { pa | put_pte_flags(flags) }; +} +static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags) +{ + ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); + return (l4_pgentry_t) { pa | put_pte_flags(flags) }; +} +#endif /* !__ASSEMBLY__ */ + +/* Construct a pte from its direct integer representation. */ +#define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) }) +#define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) }) +#define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) }) +#define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) }) + +/* Construct a pte from a page pointer and access flags. */ +#define l1e_from_page(page, flags) l1e_from_mfn(page_to_mfn(page), flags) +#define l2e_from_page(page, flags) l2e_from_mfn(page_to_mfn(page), flags) +#define l3e_from_page(page, flags) l3e_from_mfn(page_to_mfn(page), flags) +#define l4e_from_page(page, flags) l4e_from_mfn(page_to_mfn(page), flags) + +/* Add extra flags to an existing pte. */ +#define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags)) +#define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags)) +#define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags)) +#define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags)) + +/* Remove flags from an existing pte. */ +#define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags)) +#define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags)) +#define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags)) +#define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags)) + +/* Flip flags in an existing L1 PTE. */ +#define l1e_flip_flags(x, flags) ((x).l1 ^= put_pte_flags(flags)) + +/* Check if a pte's page mapping or significant access flags have changed. */ +#define l1e_has_changed(x,y,flags) \ + ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) +#define l2e_has_changed(x,y,flags) \ + ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) +#define l3e_has_changed(x,y,flags) \ + ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) +#define l4e_has_changed(x,y,flags) \ + ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) + +#define map_l1t_from_l2e(x) (l1_pgentry_t *)map_domain_page(l2e_get_mfn(x)) +#define map_l2t_from_l3e(x) (l2_pgentry_t *)map_domain_page(l3e_get_mfn(x)) +#define map_l3t_from_l4e(x) (l3_pgentry_t *)map_domain_page(l4e_get_mfn(x)) + +/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct map. */ +#define l1e_from_l2e(l2e_, offset_) ({ \ + const l1_pgentry_t *l1t_ = map_l1t_from_l2e(l2e_); \ + l1_pgentry_t l1e_ = l1t_[offset_]; \ + unmap_domain_page(l1t_); \ + l1e_; }) + +#define l2e_from_l3e(l3e_, offset_) ({ \ + const l2_pgentry_t *l2t_ = map_l2t_from_l3e(l3e_); \ + l2_pgentry_t l2e_ = l2t_[offset_]; \ + unmap_domain_page(l2t_); \ + l2e_; }) + +#define l3e_from_l4e(l4e_, offset_) ({ \ + const l3_pgentry_t *l3t_ = map_l3t_from_l4e(l4e_); \ + l3_pgentry_t l3e_ = l3t_[offset_]; \ + unmap_domain_page(l3t_); \ + l3e_; }) + +/* Given a virtual address, get an entry offset into a page table. */ +#define l1_table_offset(a) \ + (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) +#define l2_table_offset(a) \ + (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)) +#define l3_table_offset(a) \ + (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)) +#define l4_table_offset(a) \ + (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)) + +/* Convert a pointer to a page-table entry into pagetable slot index. */ +#define pgentry_ptr_to_slot(_p) \ + (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p))) + +#ifndef __ASSEMBLY__ + +/* Page-table type. */ +typedef struct { u64 pfn; } pagetable_t; +#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT) +#define pagetable_get_page(x) mfn_to_page(pagetable_get_mfn(x)) +#define pagetable_get_pfn(x) ((x).pfn) +#define pagetable_get_mfn(x) _mfn(((x).pfn)) +#define pagetable_is_null(x) ((x).pfn == 0) +#define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) }) +#define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) }) +#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg)) +#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT) +#define pagetable_null() pagetable_from_pfn(0) + +void clear_page_sse2(void *); +void copy_page_sse2(void *, const void *); + +#define clear_page(_p) clear_page_sse2(_p) +#define copy_page(_t, _f) copy_page_sse2(_t, _f) + +/* Convert between Xen-heap virtual addresses and machine addresses. */ +#define __pa(x) (virt_to_maddr(x)) +#define __va(x) (maddr_to_virt(x)) + +/* Convert between Xen-heap virtual addresses and machine frame numbers. */ +#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) +#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT)) + +/* Convert between machine frame numbers and page-info structures. */ +#define mfn_to_page(mfn) (frame_table + mfn_to_pdx(mfn)) +#define page_to_mfn(pg) pdx_to_mfn((unsigned long)((pg) - frame_table)) + +/* Convert between machine addresses and page-info structures. */ +#define __maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma)) +#define __page_to_maddr(pg) mfn_to_maddr(page_to_mfn(pg)) + +/* Convert between frame number and address formats. */ +#define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT) +#define __paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT)) +#define gfn_to_gaddr(gfn) __pfn_to_paddr(gfn_x(gfn)) +#define gaddr_to_gfn(ga) _gfn(__paddr_to_pfn(ga)) +#define mfn_to_maddr(mfn) __pfn_to_paddr(mfn_x(mfn)) +#define maddr_to_mfn(ma) _mfn(__paddr_to_pfn(ma)) + +/* + * We define non-underscored wrappers for above conversion functions. These are + * overridden in various source files while underscored versions remain intact. + */ +#define mfn_valid(mfn) __mfn_valid(mfn_x(mfn)) +#define virt_to_mfn(va) __virt_to_mfn(va) +#define mfn_to_virt(mfn) __mfn_to_virt(mfn) +#define virt_to_maddr(va) __virt_to_maddr((unsigned long)(va)) +#define maddr_to_virt(ma) __maddr_to_virt((unsigned long)(ma)) +#define maddr_to_page(ma) __maddr_to_page(ma) +#define page_to_maddr(pg) __page_to_maddr(pg) +#define virt_to_page(va) __virt_to_page(va) +#define page_to_virt(pg) __page_to_virt(pg) +#define pfn_to_paddr(pfn) __pfn_to_paddr(pfn) +#define paddr_to_pfn(pa) __paddr_to_pfn(pa) +#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa)) +#define vmap_to_mfn(va) xen_map_to_mfn((unsigned long)(va)) +#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) + +#endif /* !defined(__ASSEMBLY__) */ + +/* Where to find each level of the linear mapping */ +#define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START)) +#define __linear_l2_table \ + ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START))) +#define __linear_l3_table \ + ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START))) +#define __linear_l4_table \ + ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START))) + + +#ifndef __ASSEMBLY__ +extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES]; +extern l2_pgentry_t *compat_idle_pg_table_l2; +extern unsigned int m2p_compat_vstart; +extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES], + l2_bootmap[4*L2_PAGETABLE_ENTRIES]; +extern l3_pgentry_t l3_bootmap[L3_PAGETABLE_ENTRIES]; +extern l2_pgentry_t l2_directmap[4*L2_PAGETABLE_ENTRIES]; +extern l1_pgentry_t l1_fixmap[L1_PAGETABLE_ENTRIES]; +void paging_init(void); +void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t); +#endif /* !defined(__ASSEMBLY__) */ + +#define _PAGE_NONE _AC(0x000,U) +#define _PAGE_PRESENT _AC(0x001,U) +#define _PAGE_RW _AC(0x002,U) +#define _PAGE_USER _AC(0x004,U) +#define _PAGE_PWT _AC(0x008,U) +#define _PAGE_PCD _AC(0x010,U) +#define _PAGE_ACCESSED _AC(0x020,U) +#define _PAGE_DIRTY _AC(0x040,U) +#define _PAGE_PAT _AC(0x080,U) +#define _PAGE_PSE _AC(0x080,U) +#define _PAGE_GLOBAL _AC(0x100,U) +#define _PAGE_AVAIL0 _AC(0x200,U) +#define _PAGE_AVAIL1 _AC(0x400,U) +#define _PAGE_AVAIL2 _AC(0x800,U) +#define _PAGE_AVAIL _AC(0xE00,U) +#define _PAGE_PSE_PAT _AC(0x1000,U) +#define _PAGE_AVAIL_HIGH (_AC(0x7ff, U) << 12) + +#ifndef __ASSEMBLY__ +/* Dependency on NX being available can't be expressed. */ +#define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0) +#endif + +#define PAGE_CACHE_ATTRS (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) + +/* + * Debug option: Ensure that granted mappings are not implicitly unmapped. + * WARNING: This will need to be disabled to run OSes that use the spare PTE + * bits themselves (e.g., *BSD). + */ +#ifdef NDEBUG +#undef _PAGE_GNTTAB +#endif +#ifndef _PAGE_GNTTAB +#define _PAGE_GNTTAB 0 +#endif + +#define __PAGE_HYPERVISOR_RO (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NX) +#define __PAGE_HYPERVISOR_RW (__PAGE_HYPERVISOR_RO | \ + _PAGE_DIRTY | _PAGE_RW) +#define __PAGE_HYPERVISOR_RX (_PAGE_PRESENT | _PAGE_ACCESSED) +#define __PAGE_HYPERVISOR (__PAGE_HYPERVISOR_RX | \ + _PAGE_DIRTY | _PAGE_RW) +#define __PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR | _PAGE_PCD) +#define __PAGE_HYPERVISOR_UC (__PAGE_HYPERVISOR | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_HYPERVISOR_SHSTK (__PAGE_HYPERVISOR_RO | _PAGE_DIRTY) + +#define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages mappings */ + +#ifndef __ASSEMBLY__ + +/* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */ +static inline unsigned int pte_flags_to_cacheattr(unsigned int flags) +{ + return ((flags >> 5) & 4) | ((flags >> 3) & 3); +} +static inline unsigned int cacheattr_to_pte_flags(unsigned int cacheattr) +{ + return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3); +} + +/* return true if permission increased */ +static inline bool_t +perms_strictly_increased(uint32_t old_flags, uint32_t new_flags) +/* Given the flags of two entries, are the new flags a strict + * increase in rights over the old ones? */ +{ + uint32_t of = old_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT); + uint32_t nf = new_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT); + /* Flip the NX bit, since it's the only one that decreases rights; + * we calculate as if it were an "X" bit. */ + of ^= _PAGE_NX_BIT; + nf ^= _PAGE_NX_BIT; + /* If the changed bits are all set in the new flags, then rights strictly + * increased between old and new. */ + return ((of | (of ^ nf)) == nf); +} + +static inline void invalidate_icache(void) +{ +/* + * There is nothing to be done here as icaches are sufficiently + * coherent on x86. + */ +} + +#endif /* !__ASSEMBLY__ */ + +#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) + +#endif /* __X86_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/paging.h b/xen/arch/x86/include/asm/paging.h new file mode 100644 index 0000000000..308f1115dd --- /dev/null +++ b/xen/arch/x86/include/asm/paging.h @@ -0,0 +1,433 @@ +/****************************************************************************** + * include/asm-x86/paging.h + * + * Common interface for paging support + * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) + * Parts of this code are Copyright (c) 2006 by XenSource Inc. + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef _XEN_PAGING_H +#define _XEN_PAGING_H + +#include +#include +#include +#include +#include +#include +#include + +/***************************************************************************** + * Macros to tell which paging mode a domain is in */ + +#define PG_SH_shift 20 +#define PG_HAP_shift 21 +#define PG_SHF_shift 22 +/* We're in one of the shadow modes */ +#ifdef CONFIG_SHADOW_PAGING +#define PG_SH_enable (1U << PG_SH_shift) +#define PG_SH_forced (1U << PG_SHF_shift) +#else +#define PG_SH_enable 0 +#define PG_SH_forced 0 +#endif +#ifdef CONFIG_HVM +#define PG_HAP_enable (1U << PG_HAP_shift) +#else +#define PG_HAP_enable 0 +#endif + +/* common paging mode bits */ +#define PG_mode_shift 10 +#ifdef CONFIG_HVM +/* Refcounts based on shadow tables instead of guest tables */ +#define PG_refcounts (XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT << PG_mode_shift) +/* Xen does p2m translation, not guest */ +#define PG_translate (XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE << PG_mode_shift) +/* Xen does not steal address space from the domain for its own booking; + * requires VT or similar mechanisms */ +#define PG_external (XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL << PG_mode_shift) +#else +#define PG_refcounts 0 +#define PG_translate 0 +#define PG_external 0 +#endif +#if defined(CONFIG_HVM) || !defined(CONFIG_PV_SHIM_EXCLUSIVE) +/* Enable log dirty mode */ +#define PG_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << PG_mode_shift) +#else +#define PG_log_dirty 0 +#endif + +/* All paging modes. */ +#define PG_MASK (PG_refcounts | PG_log_dirty | PG_translate | PG_external) + +#define paging_mode_enabled(_d) (!!(_d)->arch.paging.mode) +#define paging_mode_shadow(_d) (!!((_d)->arch.paging.mode & PG_SH_enable)) +#define paging_mode_sh_forced(_d) (!!((_d)->arch.paging.mode & PG_SH_forced)) +#define paging_mode_hap(_d) (!!((_d)->arch.paging.mode & PG_HAP_enable)) + +#define paging_mode_refcounts(_d) (!!((_d)->arch.paging.mode & PG_refcounts)) +#define paging_mode_log_dirty(_d) (!!((_d)->arch.paging.mode & PG_log_dirty)) +#define paging_mode_translate(_d) (!!((_d)->arch.paging.mode & PG_translate)) +#define paging_mode_external(_d) (!!((_d)->arch.paging.mode & PG_external)) + +/* flags used for paging debug */ +#define PAGING_DEBUG_LOGDIRTY 0 + +/***************************************************************************** + * Mode-specific entry points into the shadow code. + * + * These shouldn't be used directly by callers; rather use the functions + * below which will indirect through this table as appropriate. */ + +struct shadow_paging_mode { +#ifdef CONFIG_SHADOW_PAGING + void (*detach_old_tables )(struct vcpu *v); +#ifdef CONFIG_PV + void (*write_guest_entry )(struct vcpu *v, intpte_t *p, + intpte_t new, mfn_t gmfn); + intpte_t (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p, + intpte_t old, intpte_t new, + mfn_t gmfn); +#endif +#ifdef CONFIG_HVM + int (*guess_wrmap )(struct vcpu *v, + unsigned long vaddr, mfn_t gmfn); + void (*pagetable_dying )(paddr_t gpa); + void (*trace_emul_write_val )(const void *ptr, unsigned long vaddr, + const void *src, unsigned int bytes); +#endif +#endif + /* For outsiders to tell what mode we're in */ + unsigned int shadow_levels; +}; + + +/************************************************/ +/* common paging interface */ +/************************************************/ +struct paging_mode { + int (*page_fault )(struct vcpu *v, unsigned long va, + struct cpu_user_regs *regs); + bool (*invlpg )(struct vcpu *v, + unsigned long linear); +#ifdef CONFIG_HVM + unsigned long (*gva_to_gfn )(struct vcpu *v, + struct p2m_domain *p2m, + unsigned long va, + uint32_t *pfec); + unsigned long (*p2m_ga_to_gfn )(struct vcpu *v, + struct p2m_domain *p2m, + unsigned long cr3, + paddr_t ga, uint32_t *pfec, + unsigned int *page_order); +#endif + void (*update_cr3 )(struct vcpu *v, int do_locking, + bool noflush); + void (*update_paging_modes )(struct vcpu *v); + bool (*flush_tlb )(const unsigned long *vcpu_bitmap); + + unsigned int guest_levels; + + /* paging support extension */ + struct shadow_paging_mode shadow; +}; + +/***************************************************************************** + * Log dirty code */ + +#if PG_log_dirty + +/* get the dirty bitmap for a specific range of pfns */ +void paging_log_dirty_range(struct domain *d, + unsigned long begin_pfn, + unsigned long nr, + uint8_t *dirty_bitmap); + +/* enable log dirty */ +int paging_log_dirty_enable(struct domain *d, bool log_global); + +/* log dirty initialization */ +void paging_log_dirty_init(struct domain *d, const struct log_dirty_ops *ops); + +/* mark a page as dirty */ +void paging_mark_dirty(struct domain *d, mfn_t gmfn); +/* mark a page as dirty with taking guest pfn as parameter */ +void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn); + +/* is this guest page dirty? + * This is called from inside paging code, with the paging lock held. */ +int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn); + +/* + * Log-dirty radix tree indexing: + * All tree nodes are PAGE_SIZE bytes, mapped on-demand. + * Leaf nodes are simple bitmaps; 1 bit per guest pfn. + * Interior nodes are arrays of LOGDIRTY_NODE_ENTRIES mfns. + * TODO: Dynamic radix tree height. Most guests will only need 2 levels. + * The fourth level is basically unusable on 32-bit Xen. + * TODO2: Abstract out the radix-tree mechanics? + */ +#define LOGDIRTY_NODE_ENTRIES (1 << PAGETABLE_ORDER) +#define L1_LOGDIRTY_IDX(pfn) (pfn_x(pfn) & ((1 << (PAGE_SHIFT + 3)) - 1)) +#define L2_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3)) & \ + (LOGDIRTY_NODE_ENTRIES-1)) +#define L3_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER)) & \ + (LOGDIRTY_NODE_ENTRIES-1)) +#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \ + (LOGDIRTY_NODE_ENTRIES-1)) + +#ifdef CONFIG_HVM +/* VRAM dirty tracking support */ +struct sh_dirty_vram { + unsigned long begin_pfn; + unsigned long end_pfn; +#ifdef CONFIG_SHADOW_PAGING + paddr_t *sl1ma; + uint8_t *dirty_bitmap; + s_time_t last_dirty; +#endif +}; +#endif + +#else /* !PG_log_dirty */ + +static inline void paging_log_dirty_init(struct domain *d, + const struct log_dirty_ops *ops) {} +static inline void paging_mark_dirty(struct domain *d, mfn_t gmfn) {} +static inline void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn) {} +static inline bool paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) { return false; } + +#endif /* PG_log_dirty */ + +/***************************************************************************** + * Entry points into the paging-assistance code */ + +/* Initialize the paging resource for vcpu struct. It is called by + * vcpu_initialise() in domain.c */ +void paging_vcpu_init(struct vcpu *v); + +/* Set up the paging-assistance-specific parts of a domain struct at + * start of day. Called for every domain from arch_domain_create() */ +int paging_domain_init(struct domain *d); + +/* Handler for paging-control ops: operations from user-space to enable + * and disable ephemeral shadow modes (test mode and log-dirty mode) and + * manipulate the log-dirty bitmap. */ +int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl, + bool_t resuming); + +/* Helper hypercall for dealing with continuations. */ +long paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); + +/* Call when destroying a vcpu/domain */ +void paging_vcpu_teardown(struct vcpu *v); +int paging_teardown(struct domain *d); + +/* Call once all of the references to the domain have gone away */ +void paging_final_teardown(struct domain *d); + +/* Enable an arbitrary paging-assistance mode. Call once at domain + * creation. */ +int paging_enable(struct domain *d, u32 mode); + +#define paging_get_hostmode(v) ((v)->arch.paging.mode) +#define paging_get_nestedmode(v) ((v)->arch.paging.nestedmode) +const struct paging_mode *paging_get_mode(struct vcpu *v); +void paging_update_nestedmode(struct vcpu *v); + +/* Page fault handler + * Called from pagefault handler in Xen, and from the HVM trap handlers + * for pagefaults. Returns 1 if this fault was an artefact of the + * paging code (and the guest should retry) or 0 if it is not (and the + * fault should be handled elsewhere or passed to the guest). + * + * Note: under shadow paging, this function handles all page faults; + * however, for hardware-assisted paging, this function handles only + * host page faults (i.e. nested page faults). */ +static inline int +paging_fault(unsigned long va, struct cpu_user_regs *regs) +{ + struct vcpu *v = current; + return paging_get_hostmode(v)->page_fault(v, va, regs); +} + +/* Handle invlpg requests on vcpus. */ +void paging_invlpg(struct vcpu *v, unsigned long va); + +/* + * Translate a guest virtual address to the frame number that the + * *guest* pagetables would map it to. Returns INVALID_GFN if the guest + * tables don't map this address for this kind of access. + * *pfec is used to determine which kind of access this is when + * walking the tables. The caller should set the PFEC_page_present bit + * in *pfec; in the failure case, that bit will be cleared if appropriate. + * + * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS: + * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled. + */ +unsigned long paging_gva_to_gfn(struct vcpu *v, + unsigned long va, + uint32_t *pfec); + +#ifdef CONFIG_HVM + +/* Translate a guest address using a particular CR3 value. This is used + * to by nested HAP code, to walk the guest-supplied NPT tables as if + * they were pagetables. + * Use 'paddr_t' for the guest address so it won't overflow when + * l1 or l2 guest is in 32bit PAE mode. + * If the GFN returned is not INVALID_GFN, *page_order gives + * the size of the superpage (if any) it was found in. */ +static inline unsigned long paging_ga_to_gfn_cr3(struct vcpu *v, + unsigned long cr3, + paddr_t ga, + uint32_t *pfec, + unsigned int *page_order) +{ + struct p2m_domain *p2m = v->domain->arch.p2m; + return paging_get_hostmode(v)->p2m_ga_to_gfn(v, p2m, cr3, ga, pfec, + page_order); +} + +#endif /* CONFIG_HVM */ + +/* Update all the things that are derived from the guest's CR3. + * Called when the guest changes CR3; the caller can then use v->arch.cr3 + * as the value to load into the host CR3 to schedule this vcpu */ +static inline void paging_update_cr3(struct vcpu *v, bool noflush) +{ + paging_get_hostmode(v)->update_cr3(v, 1, noflush); +} + +/* Update all the things that are derived from the guest's CR0/CR3/CR4. + * Called to initialize paging structures if the paging mode + * has changed, and when bringing up a VCPU for the first time. */ +static inline void paging_update_paging_modes(struct vcpu *v) +{ + paging_get_hostmode(v)->update_paging_modes(v); +} + +#ifdef CONFIG_PV + +/* + * Write a new value into the guest pagetable, and update the + * paging-assistance state appropriately. Returns false if we page-faulted, + * true for success. + */ +static inline void paging_write_guest_entry( + struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn) +{ +#ifdef CONFIG_SHADOW_PAGING + if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) ) + paging_get_hostmode(v)->shadow.write_guest_entry(v, p, new, gmfn); + else +#endif + write_atomic(p, new); +} + + +/* + * Cmpxchg a new value into the guest pagetable, and update the + * paging-assistance state appropriately. Returns false if we page-faulted, + * true if not. N.B. caller should check the value of "old" to see if the + * cmpxchg itself was successful. + */ +static inline intpte_t paging_cmpxchg_guest_entry( + struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn) +{ +#ifdef CONFIG_SHADOW_PAGING + if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) ) + return paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old, + new, gmfn); +#endif + return cmpxchg(p, old, new); +} + +#endif /* CONFIG_PV */ + +/* Helper function that writes a pte in such a way that a concurrent read + * never sees a half-written entry that has _PAGE_PRESENT set */ +static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new) +{ + *p = new; +} + +/* + * Called from the guest to indicate that the a process is being + * torn down and its pagetables will soon be discarded. + */ +void pagetable_dying(paddr_t gpa); + +/* Print paging-assistance info to the console */ +void paging_dump_domain_info(struct domain *d); +void paging_dump_vcpu_info(struct vcpu *v); + +/* Set the pool of shadow pages to the required number of pages. + * Input might be rounded up to at minimum amount of pages, plus + * space for the p2m table. + * Returns 0 for success, non-zero for failure. */ +int paging_set_allocation(struct domain *d, unsigned int pages, + bool *preempted); + +/* Is gfn within maxphysaddr for the domain? */ +static inline bool gfn_valid(const struct domain *d, gfn_t gfn) +{ + return !(gfn_x(gfn) >> (d->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT)); +} + +/* Maxphysaddr supportable by the paging infrastructure. */ +static always_inline unsigned int paging_max_paddr_bits(const struct domain *d) +{ + unsigned int bits = paging_mode_hap(d) ? hap_paddr_bits : paddr_bits; + + if ( paging_mode_external(d) ) + { + if ( !IS_ENABLED(CONFIG_BIGMEM) && paging_mode_shadow(d) ) + { + /* Shadowed superpages store GFNs in 32-bit page_info fields. */ + bits = min(bits, 32U + PAGE_SHIFT); + } + else + { + /* Both p2m-ept and p2m-pt only support 4-level page tables. */ + bits = min(bits, 48U); + } + } + + return bits; +} + +/* Flush selected vCPUs TLBs. NULL for all. */ +static inline bool paging_flush_tlb(const unsigned long *vcpu_bitmap) +{ + return paging_get_hostmode(current)->flush_tlb(vcpu_bitmap); +} + +#endif /* XEN_PAGING_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/pci.h b/xen/arch/x86/include/asm/pci.h new file mode 100644 index 0000000000..443f25347d --- /dev/null +++ b/xen/arch/x86/include/asm/pci.h @@ -0,0 +1,41 @@ +#ifndef __X86_PCI_H__ +#define __X86_PCI_H__ + +#define CF8_BDF(cf8) ( ((cf8) & 0x00ffff00) >> 8) +#define CF8_ADDR_LO(cf8) ( (cf8) & 0x000000fc) +#define CF8_ADDR_HI(cf8) ( ((cf8) & 0x0f000000) >> 16) +#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000)) + +#define IS_SNB_GFX(id) (id == 0x01068086 || id == 0x01168086 \ + || id == 0x01268086 || id == 0x01028086 \ + || id == 0x01128086 || id == 0x01228086 \ + || id == 0x010A8086 ) + +struct arch_pci_dev { + vmask_t used_vectors; +}; + +int pci_conf_write_intercept(unsigned int seg, unsigned int bdf, + unsigned int reg, unsigned int size, + uint32_t *data); +int pci_msi_conf_write_intercept(struct pci_dev *, unsigned int reg, + unsigned int size, uint32_t *data); +bool_t pci_mmcfg_decode(unsigned long mfn, unsigned int *seg, + unsigned int *bdf); + +bool_t pci_ro_mmcfg_decode(unsigned long mfn, unsigned int *seg, + unsigned int *bdf); + +/* MMCFG external variable defines */ +extern int pci_mmcfg_config_num; +extern struct acpi_mcfg_allocation *pci_mmcfg_config; + +/* Unlike ARM, PCI passthrough is always enabled for x86. */ +static always_inline bool is_pci_passthrough_enabled(void) +{ + return true; +} + +static inline void arch_pci_init_pdev(struct pci_dev *pdev) {} + +#endif /* __X86_PCI_H__ */ diff --git a/xen/arch/x86/include/asm/percpu.h b/xen/arch/x86/include/asm/percpu.h new file mode 100644 index 0000000000..2b0c29a233 --- /dev/null +++ b/xen/arch/x86/include/asm/percpu.h @@ -0,0 +1,22 @@ +#ifndef __X86_PERCPU_H__ +#define __X86_PERCPU_H__ + +#ifndef __ASSEMBLY__ +extern char __per_cpu_start[], __per_cpu_data_end[]; +extern unsigned long __per_cpu_offset[NR_CPUS]; +void percpu_init_areas(void); +#endif + +/* var is in discarded region: offset to particular copy we want */ +#define per_cpu(var, cpu) \ + (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) +#define this_cpu(var) \ + (*RELOC_HIDE(&per_cpu__##var, get_cpu_info()->per_cpu_offset)) + +#define this_cpu_ptr(var) \ + (*RELOC_HIDE(var, get_cpu_info()->per_cpu_offset)) + +#define per_cpu_ptr(var, cpu) \ + (*RELOC_HIDE(var, __per_cpu_offset[cpu])) + +#endif /* __X86_PERCPU_H__ */ diff --git a/xen/arch/x86/include/asm/perfc.h b/xen/arch/x86/include/asm/perfc.h new file mode 100644 index 0000000000..a1a591e803 --- /dev/null +++ b/xen/arch/x86/include/asm/perfc.h @@ -0,0 +1,12 @@ +#ifndef __ASM_PERFC_H__ +#define __ASM_PERFC_H__ + +static inline void arch_perfc_reset(void) +{ +} + +static inline void arch_perfc_gather(void) +{ +} + +#endif diff --git a/xen/arch/x86/include/asm/perfc_defn.h b/xen/arch/x86/include/asm/perfc_defn.h new file mode 100644 index 0000000000..1a9ea3f89e --- /dev/null +++ b/xen/arch/x86/include/asm/perfc_defn.h @@ -0,0 +1,120 @@ +/* This file is legitimately included multiple times. */ +/*#ifndef __XEN_PERFC_DEFN_H__*/ +/*#define __XEN_PERFC_DEFN_H__*/ + +PERFCOUNTER_ARRAY(exceptions, "exceptions", 32) + +#define VMX_PERF_EXIT_REASON_SIZE 56 +#define VMX_PERF_VECTOR_SIZE 0x20 +PERFCOUNTER_ARRAY(vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE) +PERFCOUNTER_ARRAY(cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE) + +#define VMEXIT_NPF_PERFC 141 +#define SVM_PERF_EXIT_REASON_SIZE (1+141) +PERFCOUNTER_ARRAY(svmexits, "SVMexits", SVM_PERF_EXIT_REASON_SIZE) + +PERFCOUNTER(seg_fixups, "segmentation fixups") + +PERFCOUNTER(apic_timer, "apic timer interrupts") + +PERFCOUNTER(domain_page_tlb_flush, "domain page tlb flushes") + +PERFCOUNTER(calls_to_mmuext_op, "calls to mmuext_op") +PERFCOUNTER(num_mmuext_ops, "mmuext ops") +PERFCOUNTER(calls_to_mmu_update, "calls to mmu_update") +PERFCOUNTER(num_page_updates, "page updates") +PERFCOUNTER(writable_mmu_updates, "mmu_updates of writable pages") +PERFCOUNTER(calls_to_update_va, "calls to update_va_map") +PERFCOUNTER(page_faults, "page faults") +PERFCOUNTER(copy_user_faults, "copy_user faults") + +PERFCOUNTER(map_domain_page_count, "map_domain_page count") +PERFCOUNTER(ptwr_emulations, "writable pt emulations") +PERFCOUNTER(mmio_ro_emulations, "mmio ro emulations") + +PERFCOUNTER(exception_fixed, "pre-exception fixed") + +PERFCOUNTER(guest_walk, "guest pagetable walks") + +/* Shadow counters */ +PERFCOUNTER(shadow_alloc, "calls to shadow_alloc") +PERFCOUNTER(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs") + +/* STATUS counters do not reset when 'P' is hit */ +PERFSTATUS(shadow_alloc_count, "number of shadow pages in use") +PERFCOUNTER(shadow_free, "calls to shadow_free") +PERFCOUNTER(shadow_prealloc_1, "shadow recycles old shadows") +PERFCOUNTER(shadow_prealloc_2, "shadow recycles in-use shadows") +PERFCOUNTER(shadow_linear_map_failed, "shadow hit read-only linear map") +PERFCOUNTER(shadow_a_update, "shadow A bit update") +PERFCOUNTER(shadow_ad_update, "shadow A&D bit update") +PERFCOUNTER(shadow_fault, "calls to shadow_fault") +PERFCOUNTER(shadow_fault_fast_gnp, "shadow_fault fast path n/p") +PERFCOUNTER(shadow_fault_fast_mmio, "shadow_fault fast path mmio") +PERFCOUNTER(shadow_fault_fast_fail, "shadow_fault fast path error") +PERFCOUNTER(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn") +PERFCOUNTER(shadow_fault_bail_real_fault, + "shadow_fault really guest fault") +PERFCOUNTER(shadow_fault_emulate_read, "shadow_fault emulates a read") +PERFCOUNTER(shadow_fault_emulate_write, "shadow_fault emulates a write") +PERFCOUNTER(shadow_fault_emulate_failed, "shadow_fault emulator fails") +PERFCOUNTER(shadow_fault_emulate_stack, "shadow_fault emulate stack write") +PERFCOUNTER(shadow_fault_emulate_wp, "shadow_fault emulate for CR0.WP=0") +PERFCOUNTER(shadow_fault_fast_emulate, "shadow_fault fast emulate") +PERFCOUNTER(shadow_fault_fast_emulate_fail, + "shadow_fault fast emulate failed") +PERFCOUNTER(shadow_fault_mmio, "shadow_fault handled as mmio") +PERFCOUNTER(shadow_fault_fixed, "shadow_fault fixed fault") +PERFCOUNTER(shadow_ptwr_emulate, "shadow causes ptwr to emulate") +PERFCOUNTER(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e") +PERFCOUNTER(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e") +PERFCOUNTER(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e") +PERFCOUNTER(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e") +PERFCOUNTER(shadow_hash_lookups, "calls to shadow_hash_lookup") +PERFCOUNTER(shadow_hash_lookup_head, "shadow hash hit in bucket head") +PERFCOUNTER(shadow_hash_lookup_miss, "shadow hash misses") +PERFCOUNTER(shadow_get_shadow_status, "calls to get_shadow_status") +PERFCOUNTER(shadow_hash_inserts, "calls to shadow_hash_insert") +PERFCOUNTER(shadow_hash_deletes, "calls to shadow_hash_delete") +PERFCOUNTER(shadow_writeable, "shadow removes write access") +PERFCOUNTER(shadow_writeable_h_1, "shadow writeable: 32b w2k3") +PERFCOUNTER(shadow_writeable_h_2, "shadow writeable: 32pae w2k3") +PERFCOUNTER(shadow_writeable_h_3, "shadow writeable: 64b w2k3") +PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: linux low/solaris") +PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: linux high") +PERFCOUNTER(shadow_writeable_h_6, "shadow writeable: FreeBSD") +PERFCOUNTER(shadow_writeable_h_7, "shadow writeable: sl1p") +PERFCOUNTER(shadow_writeable_h_8, "shadow writeable: sl1p failed") +PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force") +PERFCOUNTER(shadow_writeable_bf_1, "shadow writeable resync bf") +PERFCOUNTER(shadow_mappings, "shadow removes all mappings") +PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force") +PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit") +PERFCOUNTER(shadow_unshadow, "shadow unshadows a page") +PERFCOUNTER(shadow_up_pointer, "shadow unshadow by up-pointer") +PERFCOUNTER(shadow_unshadow_bf, "shadow unshadow brute-force") +PERFCOUNTER(shadow_get_page_fail, "shadow_get_page_from_l1e failed") +PERFCOUNTER(shadow_check_gwalk, "shadow checks gwalk") +PERFCOUNTER(shadow_inconsistent_gwalk, "shadow check inconsistent gwalk") +PERFCOUNTER(shadow_rm_write_flush_tlb, + "shadow flush tlb by removing write perm") + +PERFCOUNTER(shadow_invlpg, "shadow emulates invlpg") +PERFCOUNTER(shadow_invlpg_fault, "shadow invlpg faults") + +PERFCOUNTER(shadow_em_ex_pt, "shadow extra pt write") +PERFCOUNTER(shadow_em_ex_non_pt, "shadow extra non-pt-write op") +PERFCOUNTER(shadow_em_ex_fail, "shadow extra emulation failed") + +PERFCOUNTER(shadow_oos_fixup_add, "shadow OOS fixup adds") +PERFCOUNTER(shadow_oos_fixup_evict,"shadow OOS fixup evictions") +PERFCOUNTER(shadow_unsync, "shadow OOS unsyncs") +PERFCOUNTER(shadow_unsync_evict, "shadow OOS evictions") +PERFCOUNTER(shadow_resync, "shadow OOS resyncs") + +PERFCOUNTER(realmode_emulations, "realmode instructions emulated") +PERFCOUNTER(realmode_exits, "vmexits from realmode") + +PERFCOUNTER(pauseloop_exits, "vmexits from Pause-Loop Detection") + +/*#endif*/ /* __XEN_PERFC_DEFN_H__ */ diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h new file mode 100644 index 0000000000..400b4fac5e --- /dev/null +++ b/xen/arch/x86/include/asm/processor.h @@ -0,0 +1,650 @@ + +/* Portions are: Copyright (c) 1994 Linus Torvalds */ + +#ifndef __ASM_X86_PROCESSOR_H +#define __ASM_X86_PROCESSOR_H + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#endif + +#include +#include + +/* + * Trap/fault mnemonics. + */ +#define TRAP_divide_error 0 +#define TRAP_debug 1 +#define TRAP_nmi 2 +#define TRAP_int3 3 +#define TRAP_overflow 4 +#define TRAP_bounds 5 +#define TRAP_invalid_op 6 +#define TRAP_no_device 7 +#define TRAP_double_fault 8 +#define TRAP_copro_seg 9 +#define TRAP_invalid_tss 10 +#define TRAP_no_segment 11 +#define TRAP_stack_error 12 +#define TRAP_gp_fault 13 +#define TRAP_page_fault 14 +#define TRAP_spurious_int 15 +#define TRAP_copro_error 16 +#define TRAP_alignment_check 17 +#define TRAP_machine_check 18 +#define TRAP_simd_error 19 +#define TRAP_virtualisation 20 +#define TRAP_nr 32 + +#define TRAP_HAVE_EC X86_EXC_HAVE_EC + +/* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */ +/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */ +#define TRAP_syscall 256 + +/* Boolean return code: the reason for a fault has been fixed. */ +#define EXCRET_fault_fixed 1 + +/* 'trap_bounce' flags values */ +#define TBF_EXCEPTION 1 +#define TBF_EXCEPTION_ERRCODE 2 +#define TBF_INTERRUPT 8 + +/* 'arch_vcpu' flags values */ +#define _TF_kernel_mode 0 +#define TF_kernel_mode (1<<_TF_kernel_mode) + +/* #PF error code values. */ +#define PFEC_page_present (_AC(1,U) << 0) +#define PFEC_write_access (_AC(1,U) << 1) +#define PFEC_user_mode (_AC(1,U) << 2) +#define PFEC_reserved_bit (_AC(1,U) << 3) +#define PFEC_insn_fetch (_AC(1,U) << 4) +#define PFEC_prot_key (_AC(1,U) << 5) +#define PFEC_shstk (_AC(1,U) << 6) +#define PFEC_arch_mask (_AC(0xffff,U)) /* Architectural PFEC values. */ +/* Internally used only flags. */ +#define PFEC_page_paged (1U<<16) +#define PFEC_page_shared (1U<<17) +#define PFEC_implicit (1U<<18) /* Pagewalk input for ldt/gdt/idt/tr accesses. */ +#define PFEC_synth_mask (~PFEC_arch_mask) /* Synthetic PFEC values. */ + +/* Other exception error code values. */ +#define X86_XEC_EXT (_AC(1,U) << 0) +#define X86_XEC_IDT (_AC(1,U) << 1) +#define X86_XEC_TI (_AC(1,U) << 2) + +#define XEN_MINIMAL_CR4 (X86_CR4_PGE | X86_CR4_PAE) + +#define XEN_CR4_PV32_BITS (X86_CR4_SMEP|X86_CR4_SMAP) + +/* Common SYSCALL parameters. */ +#define XEN_MSR_STAR (((uint64_t)FLAT_RING3_CS32 << 48) | \ + ((uint64_t)__HYPERVISOR_CS << 32)) +#define XEN_SYSCALL_MASK (X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF| \ + X86_EFLAGS_NT|X86_EFLAGS_DF|X86_EFLAGS_IF| \ + X86_EFLAGS_TF) + +/* + * Host IA32_CR_PAT value to cover all memory types. This is not the default + * MSR_PAT value, and is an ABI with PV guests. + */ +#define XEN_MSR_PAT _AC(0x050100070406, ULL) + +#ifndef __ASSEMBLY__ + +struct domain; +struct vcpu; + +struct x86_cpu_id { + uint16_t vendor; + uint16_t family; + uint16_t model; + uint16_t feature; /* bit index */ + const void *driver_data; +}; + +struct cpuinfo_x86 { + __u8 x86; /* CPU family */ + __u8 x86_vendor; /* CPU vendor */ + __u8 x86_model; + __u8 x86_mask; + int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ + __u32 extended_cpuid_level; /* Maximum supported CPUID extended level */ + unsigned int x86_capability[NCAPINTS]; + char x86_vendor_id[16]; + char x86_model_id[64]; + int x86_cache_size; /* in KB - valid for CPUS which support this call */ + int x86_cache_alignment; /* In bytes */ + __u32 x86_max_cores; /* cpuid returned max cores value */ + __u32 booted_cores; /* number of cores as seen by OS */ + __u32 x86_num_siblings; /* cpuid logical cpus per chip value */ + __u32 apicid; + __u32 phys_proc_id; /* package ID of each logical CPU */ + __u32 cpu_core_id; /* core ID of each logical CPU*/ + __u32 compute_unit_id; /* AMD compute unit ID of each logical CPU */ + unsigned short x86_clflush_size; +} __cacheline_aligned; + +/* + * capabilities of CPUs + */ + +extern struct cpuinfo_x86 boot_cpu_data; + +extern struct cpuinfo_x86 cpu_data[]; +#define current_cpu_data cpu_data[smp_processor_id()] + +extern bool probe_cpuid_faulting(void); +extern void ctxt_switch_levelling(const struct vcpu *next); +extern void (*ctxt_switch_masking)(const struct vcpu *next); + +extern bool_t opt_cpu_info; +extern u32 trampoline_efer; +extern u64 trampoline_misc_enable_off; + +/* Maximum width of physical addresses supported by the hardware. */ +extern unsigned int paddr_bits; +/* Max physical address width supported within HAP guests. */ +extern unsigned int hap_paddr_bits; +/* Maximum width of virtual addresses supported by the hardware. */ +extern unsigned int vaddr_bits; + +extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[]); + +extern void identify_cpu(struct cpuinfo_x86 *); +extern void setup_clear_cpu_cap(unsigned int); +extern void setup_force_cpu_cap(unsigned int); +extern bool is_forced_cpu_cap(unsigned int); +extern void print_cpu_info(unsigned int cpu); +extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); + +#define cpu_to_core(_cpu) (cpu_data[_cpu].cpu_core_id) +#define cpu_to_socket(_cpu) (cpu_data[_cpu].phys_proc_id) + +unsigned int apicid_to_socket(unsigned int); + +static inline int cpu_nr_siblings(unsigned int cpu) +{ + return cpu_data[cpu].x86_num_siblings; +} + +/* + * Generic CPUID function + * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx + * resulting in stale register contents being returned. + */ +#define cpuid(_op,_eax,_ebx,_ecx,_edx) \ + asm volatile ( "cpuid" \ + : "=a" (*(int *)(_eax)), \ + "=b" (*(int *)(_ebx)), \ + "=c" (*(int *)(_ecx)), \ + "=d" (*(int *)(_edx)) \ + : "0" (_op), "2" (0) ) + +/* Some CPUID calls want 'count' to be placed in ecx */ +static inline void cpuid_count( + unsigned int op, + unsigned int count, + unsigned int *eax, + unsigned int *ebx, + unsigned int *ecx, + unsigned int *edx) +{ + asm volatile ( "cpuid" + : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) + : "0" (op), "c" (count) ); +} + +/* + * CPUID functions returning a single datum + */ +static always_inline unsigned int cpuid_eax(unsigned int op) +{ + unsigned int eax; + + asm volatile ( "cpuid" + : "=a" (eax) + : "0" (op) + : "bx", "cx", "dx" ); + return eax; +} + +static always_inline unsigned int cpuid_ebx(unsigned int op) +{ + unsigned int eax, ebx; + + asm volatile ( "cpuid" + : "=a" (eax), "=b" (ebx) + : "0" (op) + : "cx", "dx" ); + return ebx; +} + +static always_inline unsigned int cpuid_ecx(unsigned int op) +{ + unsigned int eax, ecx; + + asm volatile ( "cpuid" + : "=a" (eax), "=c" (ecx) + : "0" (op) + : "bx", "dx" ); + return ecx; +} + +static always_inline unsigned int cpuid_edx(unsigned int op) +{ + unsigned int eax, edx; + + asm volatile ( "cpuid" + : "=a" (eax), "=d" (edx) + : "0" (op) + : "bx", "cx" ); + return edx; +} + +static always_inline unsigned int cpuid_count_ebx( + unsigned int leaf, unsigned int subleaf) +{ + unsigned int ebx, tmp; + + cpuid_count(leaf, subleaf, &tmp, &ebx, &tmp, &tmp); + + return ebx; +} + +static always_inline unsigned int cpuid_count_edx( + unsigned int leaf, unsigned int subleaf) +{ + unsigned int edx, tmp; + + cpuid_count(leaf, subleaf, &tmp, &tmp, &tmp, &edx); + + return edx; +} + +static inline unsigned long read_cr0(void) +{ + unsigned long cr0; + asm volatile ( "mov %%cr0,%0\n\t" : "=r" (cr0) ); + return cr0; +} + +static inline void write_cr0(unsigned long val) +{ + asm volatile ( "mov %0,%%cr0" : : "r" ((unsigned long)val) ); +} + +static inline unsigned long read_cr2(void) +{ + unsigned long cr2; + asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) ); + return cr2; +} + +static inline void write_cr3(unsigned long val) +{ + asm volatile ( "mov %0, %%cr3" : : "r" (val) : "memory" ); +} + +static inline unsigned long cr3_pa(unsigned long cr3) +{ + return cr3 & X86_CR3_ADDR_MASK; +} + +static inline unsigned int cr3_pcid(unsigned long cr3) +{ + return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0; +} + +static inline unsigned long read_cr4(void) +{ + return get_cpu_info()->cr4; +} + +static inline void write_cr4(unsigned long val) +{ + struct cpu_info *info = get_cpu_info(); + +#ifdef CONFIG_PV + /* No global pages in case of PCIDs enabled! */ + ASSERT(!(val & X86_CR4_PGE) || !(val & X86_CR4_PCIDE)); +#else + ASSERT(!(val & X86_CR4_PCIDE)); +#endif + + /* + * On hardware supporting FSGSBASE, the value in %cr4 is the kernel's + * choice for 64bit PV guests, which impacts whether Xen can use the + * instructions. + * + * The {rd,wr}{fs,gs}base() helpers use info->cr4 to work out whether it + * is safe to execute the {RD,WR}{FS,GS}BASE instruction, falling back to + * the MSR path if not. Some users require interrupt safety. + * + * If FSGSBASE is currently or about to become clear, reflect this in + * info->cr4 before updating %cr4, so an interrupt which hits in the + * middle won't observe FSGSBASE set in info->cr4 but clear in %cr4. + */ + info->cr4 = val & (info->cr4 | ~X86_CR4_FSGSBASE); + + asm volatile ( "mov %[val], %%cr4" + : "+m" (info->cr4) /* Force ordering without a barrier. */ + : [val] "r" (val) ); + + info->cr4 = val; +} + +/* Clear and set 'TS' bit respectively */ +static inline void clts(void) +{ + asm volatile ( "clts" ); +} + +static inline void stts(void) +{ + write_cr0(X86_CR0_TS|read_cr0()); +} + +/* + * Save the cr4 feature set we're using (ie + * Pentium 4MB enable and PPro Global page + * enable), so that any CPU's that boot up + * after us can get the correct flags. + */ +extern unsigned long mmu_cr4_features; + +static always_inline void set_in_cr4 (unsigned long mask) +{ + mmu_cr4_features |= mask; + write_cr4(read_cr4() | mask); +} + +static inline unsigned int rdpkru(void) +{ + unsigned int pkru; + + asm volatile (".byte 0x0f,0x01,0xee" + : "=a" (pkru) : "c" (0) : "dx"); + + return pkru; +} + +static inline void wrpkru(unsigned int pkru) +{ + asm volatile ( ".byte 0x0f, 0x01, 0xef" + :: "a" (pkru), "d" (0), "c" (0) ); +} + +/* Macros for PKRU domain */ +#define PKRU_READ (0) +#define PKRU_WRITE (1) +#define PKRU_ATTRS (2) + +/* + * PKRU defines 32 bits, there are 16 domains and 2 attribute bits per + * domain in pkru, pkeys is index to a defined domain, so the value of + * pte_pkeys * PKRU_ATTRS + R/W is offset of a defined domain attribute. + */ +static inline bool_t read_pkru_ad(uint32_t pkru, unsigned int pkey) +{ + ASSERT(pkey < 16); + return (pkru >> (pkey * PKRU_ATTRS + PKRU_READ)) & 1; +} + +static inline bool_t read_pkru_wd(uint32_t pkru, unsigned int pkey) +{ + ASSERT(pkey < 16); + return (pkru >> (pkey * PKRU_ATTRS + PKRU_WRITE)) & 1; +} + +static always_inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) +{ + /* "monitor %eax,%ecx,%edx;" */ + asm volatile ( + ".byte 0x0f,0x01,0xc8;" + : : "a" (eax), "c" (ecx), "d"(edx) ); +} + +static always_inline void __mwait(unsigned long eax, unsigned long ecx) +{ + /* "mwait %eax,%ecx;" */ + asm volatile ( + ".byte 0x0f,0x01,0xc9;" + : : "a" (eax), "c" (ecx) ); +} + +#define IOBMP_BYTES 8192 +#define IOBMP_INVALID_OFFSET 0x8000 + +struct __packed tss64 { + uint32_t :32; + uint64_t rsp0, rsp1, rsp2; + uint64_t :64; + /* + * Interrupt Stack Table is 1-based so tss->ist[0] corresponds to an IST + * value of 1 in an Interrupt Descriptor. + */ + uint64_t ist[7]; + uint64_t :64; + uint16_t :16, bitmap; +}; +struct tss_page { + uint64_t __aligned(PAGE_SIZE) ist_ssp[8]; + struct tss64 tss; +}; +DECLARE_PER_CPU(struct tss_page, tss_page); + +#define IST_NONE 0UL +#define IST_MCE 1UL +#define IST_NMI 2UL +#define IST_DB 3UL +#define IST_DF 4UL +#define IST_MAX 4UL + +/* Set the Interrupt Stack Table used by a particular IDT entry. */ +static inline void set_ist(idt_entry_t *idt, unsigned int ist) +{ + /* IST is a 3 bit field, 32 bits into the IDT entry. */ + ASSERT(ist <= IST_MAX); + + /* Typically used on a live idt. Disuade any clever optimisations. */ + ACCESS_ONCE(idt->ist) = ist; +} + +static inline void enable_each_ist(idt_entry_t *idt) +{ + set_ist(&idt[TRAP_double_fault], IST_DF); + set_ist(&idt[TRAP_nmi], IST_NMI); + set_ist(&idt[TRAP_machine_check], IST_MCE); + set_ist(&idt[TRAP_debug], IST_DB); +} + +static inline void disable_each_ist(idt_entry_t *idt) +{ + set_ist(&idt[TRAP_double_fault], IST_NONE); + set_ist(&idt[TRAP_nmi], IST_NONE); + set_ist(&idt[TRAP_machine_check], IST_NONE); + set_ist(&idt[TRAP_debug], IST_NONE); +} + +#define IDT_ENTRIES 256 +extern idt_entry_t idt_table[]; +extern idt_entry_t *idt_tables[]; + +DECLARE_PER_CPU(root_pgentry_t *, root_pgt); + +extern void write_ptbase(struct vcpu *v); + +/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ +static always_inline void rep_nop(void) +{ + asm volatile ( "rep;nop" : : : "memory" ); +} + +#define cpu_relax() rep_nop() + +void show_code(const struct cpu_user_regs *regs); +void show_stack_overflow(unsigned int cpu, const struct cpu_user_regs *regs); +void show_registers(const struct cpu_user_regs *regs); +void show_execution_state(const struct cpu_user_regs *regs); +#define dump_execution_state() run_in_exception_handler(show_execution_state) +void show_page_walk(unsigned long addr); +void noreturn fatal_trap(const struct cpu_user_regs *regs, bool_t show_remote); + +extern void mtrr_ap_init(void); +extern void mtrr_bp_init(void); + +void mcheck_init(struct cpuinfo_x86 *c, bool_t bsp); + +/* Dispatch table for exceptions */ +extern void (* const exception_table[TRAP_nr])(struct cpu_user_regs *regs); + +#define DECLARE_TRAP_HANDLER(_name) \ + void _name(void); \ + void do_ ## _name(struct cpu_user_regs *regs) +#define DECLARE_TRAP_HANDLER_CONST(_name) \ + void _name(void); \ + void do_ ## _name(const struct cpu_user_regs *regs) + +DECLARE_TRAP_HANDLER(divide_error); +DECLARE_TRAP_HANDLER(debug); +DECLARE_TRAP_HANDLER_CONST(nmi); +DECLARE_TRAP_HANDLER(int3); +DECLARE_TRAP_HANDLER(overflow); +DECLARE_TRAP_HANDLER(bounds); +DECLARE_TRAP_HANDLER(invalid_op); +DECLARE_TRAP_HANDLER(device_not_available); +DECLARE_TRAP_HANDLER(double_fault); +DECLARE_TRAP_HANDLER(invalid_TSS); +DECLARE_TRAP_HANDLER(segment_not_present); +DECLARE_TRAP_HANDLER(stack_segment); +DECLARE_TRAP_HANDLER(general_protection); +DECLARE_TRAP_HANDLER(page_fault); +DECLARE_TRAP_HANDLER(early_page_fault); +DECLARE_TRAP_HANDLER(coprocessor_error); +DECLARE_TRAP_HANDLER(simd_coprocessor_error); +DECLARE_TRAP_HANDLER_CONST(machine_check); +DECLARE_TRAP_HANDLER(alignment_check); +DECLARE_TRAP_HANDLER(entry_CP); + +DECLARE_TRAP_HANDLER(entry_int82); + +#undef DECLARE_TRAP_HANDLER_CONST +#undef DECLARE_TRAP_HANDLER + +void trap_nop(void); + +static inline void enable_nmis(void) +{ + unsigned long tmp; + + asm volatile ( "mov %%rsp, %[rsp] \n\t" + "lea .Ldone(%%rip), %[rip] \n\t" +#ifdef CONFIG_XEN_SHSTK + /* Check for CET-SS being active. */ + "mov $1, %k[ssp] \n\t" + "rdsspq %[ssp] \n\t" + "cmp $1, %k[ssp] \n\t" + "je .Lshstk_done \n\t" + + /* Push 3 words on the shadow stack */ + ".rept 3 \n\t" + "call 1f; nop; 1: \n\t" + ".endr \n\t" + + /* Fixup to be an IRET shadow stack frame */ + "wrssq %q[cs], -1*8(%[ssp]) \n\t" + "wrssq %[rip], -2*8(%[ssp]) \n\t" + "wrssq %[ssp], -3*8(%[ssp]) \n\t" + + ".Lshstk_done:" +#endif + /* Write an IRET regular frame */ + "push %[ss] \n\t" + "push %[rsp] \n\t" + "pushf \n\t" + "push %q[cs] \n\t" + "push %[rip] \n\t" + "iretq \n\t" + ".Ldone: \n\t" + : [rip] "=&r" (tmp), + [rsp] "=&r" (tmp), + [ssp] "=&r" (tmp) + : [ss] "i" (__HYPERVISOR_DS), + [cs] "r" (__HYPERVISOR_CS) ); +} + +void sysenter_entry(void); +void sysenter_eflags_saved(void); +void int80_direct_trap(void); + +struct stubs { + union { + void(*func)(void); + unsigned long addr; + }; + unsigned long mfn; +}; + +DECLARE_PER_CPU(struct stubs, stubs); +unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn); + +void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, + uint32_t subleaf, struct cpuid_leaf *res); +int guest_rdmsr_xen(const struct vcpu *v, uint32_t idx, uint64_t *val); +int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val); + +static inline uint8_t get_cpu_family(uint32_t raw, uint8_t *model, + uint8_t *stepping) +{ + uint8_t fam = (raw >> 8) & 0xf; + + if ( fam == 0xf ) + fam += (raw >> 20) & 0xff; + + if ( model ) + { + uint8_t mod = (raw >> 4) & 0xf; + + if ( fam >= 0x6 ) + mod |= (raw >> 12) & 0xf0; + + *model = mod; + } + if ( stepping ) + *stepping = raw & 0xf; + return fam; +} + +extern int8_t opt_tsx, cpu_has_tsx_ctrl; +extern bool rtm_disabled; +void tsx_init(void); + +enum ap_boot_method { + AP_BOOT_NORMAL, + AP_BOOT_SKINIT, +}; +extern enum ap_boot_method ap_boot_method; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_X86_PROCESSOR_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/psr.h b/xen/arch/x86/include/asm/psr.h new file mode 100644 index 0000000000..c2257da7fc --- /dev/null +++ b/xen/arch/x86/include/asm/psr.h @@ -0,0 +1,99 @@ +/* + * psr.h: Platform Shared Resource related service for guest. + * + * Copyright (c) 2014, Intel Corporation + * Author: Dongxiao Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#ifndef __ASM_PSR_H__ +#define __ASM_PSR_H__ + +#include + +/* CAT cpuid level */ +#define PSR_CPUID_LEVEL_CAT 0x10 + +/* Resource Type Enumeration */ +#define PSR_RESOURCE_TYPE_L3 0x2 +#define PSR_RESOURCE_TYPE_L2 0x4 +#define PSR_RESOURCE_TYPE_MBA 0x8 + +/* L3 Monitoring Features */ +#define PSR_CMT_L3_OCCUPANCY 0x1 + +/* CDP Capability */ +#define PSR_CAT_CDP_CAPABILITY (1u << 2) + +/* L3 CDP Enable bit*/ +#define PSR_L3_QOS_CDP_ENABLE_BIT 0x0 + +/* Used by psr_get_info() */ +#define PSR_INFO_IDX_COS_MAX 0 +#define PSR_INFO_IDX_CAT_CBM_LEN 1 +#define PSR_INFO_IDX_CAT_FLAGS 2 +#define PSR_INFO_IDX_MBA_THRTL_MAX 1 +#define PSR_INFO_IDX_MBA_FLAGS 2 +#define PSR_INFO_ARRAY_SIZE 3 + +struct psr_cmt_l3 { + unsigned int features; + unsigned int upscaling_factor; + unsigned int rmid_max; +}; + +struct psr_cmt { + unsigned int rmid_max; + unsigned int features; + domid_t *rmid_to_dom; + struct psr_cmt_l3 l3; +}; + +enum psr_type { + PSR_TYPE_L3_CBM, + PSR_TYPE_L3_CODE, + PSR_TYPE_L3_DATA, + PSR_TYPE_L2_CBM, + PSR_TYPE_MBA_THRTL, + PSR_TYPE_UNKNOWN, +}; + +extern struct psr_cmt *psr_cmt; + +static inline bool_t psr_cmt_enabled(void) +{ + return !!psr_cmt; +} + +int psr_alloc_rmid(struct domain *d); +void psr_free_rmid(struct domain *d); +void psr_ctxt_switch_to(struct domain *d); + +int psr_get_info(unsigned int socket, enum psr_type type, + uint32_t data[], unsigned int array_len); +int psr_get_val(struct domain *d, unsigned int socket, + uint32_t *val, enum psr_type type); +int psr_set_val(struct domain *d, unsigned int socket, + uint64_t val, enum psr_type type); + +void psr_domain_init(struct domain *d); +void psr_domain_free(struct domain *d); + +#endif /* __ASM_PSR_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/pv/domain.h b/xen/arch/x86/include/asm/pv/domain.h new file mode 100644 index 0000000000..df9716ff26 --- /dev/null +++ b/xen/arch/x86/include/asm/pv/domain.h @@ -0,0 +1,120 @@ +/* + * pv/domain.h + * + * PV guest interface definitions + * + * Copyright (C) 2017 Wei Liu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __X86_PV_DOMAIN_H__ +#define __X86_PV_DOMAIN_H__ + +#include + +#ifdef CONFIG_PV32 +extern int8_t opt_pv32; +#else +# define opt_pv32 false +#endif + +/* + * PCID values for the address spaces of 64-bit pv domains: + * + * We are using 4 PCID values for a 64 bit pv domain subject to XPTI: + * - hypervisor active and guest in kernel mode PCID 0 + * - hypervisor active and guest in user mode PCID 1 + * - guest active and in kernel mode PCID 2 + * - guest active and in user mode PCID 3 + * + * Without XPTI only 2 values are used: + * - guest in kernel mode PCID 0 + * - guest in user mode PCID 1 + */ + +#define PCID_PV_PRIV 0x0000 /* Used for other domains, too. */ +#define PCID_PV_USER 0x0001 +#define PCID_PV_XPTI 0x0002 /* To be ORed to above values. */ + +/* + * Return additional PCID specific cr3 bits. + * + * Note that X86_CR3_NOFLUSH will not be readable in cr3. Anyone consuming + * v->arch.cr3 should mask away X86_CR3_NOFLUSH and X86_CR3_PCIDMASK in case + * the value is used to address the root page table. + */ +static inline unsigned long get_pcid_bits(const struct vcpu *v, bool is_xpti) +{ +#ifdef CONFIG_PV + return X86_CR3_NOFLUSH | (is_xpti ? PCID_PV_XPTI : 0) | + ((v->arch.flags & TF_kernel_mode) ? PCID_PV_PRIV : PCID_PV_USER); +#else + ASSERT_UNREACHABLE(); + return 0; +#endif +} + +#ifdef CONFIG_PV + +void pv_vcpu_destroy(struct vcpu *v); +int pv_vcpu_initialise(struct vcpu *v); +void pv_domain_destroy(struct domain *d); +int pv_domain_initialise(struct domain *d); + +/* + * Bits which a PV guest can toggle in its view of cr4. Some are loaded into + * hardware, while some are fully emulated. + */ +#define PV_CR4_GUEST_MASK \ + (X86_CR4_TSD | X86_CR4_DE | X86_CR4_FSGSBASE | X86_CR4_OSXSAVE) + +/* Bits which a PV guest may observe from the real hardware settings. */ +#define PV_CR4_GUEST_VISIBLE_MASK \ + (X86_CR4_PAE | X86_CR4_MCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT) + +/* Given a new cr4 value, construct the resulting guest-visible cr4 value. */ +unsigned long pv_fixup_guest_cr4(const struct vcpu *v, unsigned long cr4); + +/* Create a cr4 value to load into hardware, based on vcpu settings. */ +unsigned long pv_make_cr4(const struct vcpu *v); + +bool xpti_pcid_enabled(void); + +#else /* !CONFIG_PV */ + +#include + +static inline void pv_vcpu_destroy(struct vcpu *v) {} +static inline int pv_vcpu_initialise(struct vcpu *v) { return -EOPNOTSUPP; } +static inline void pv_domain_destroy(struct domain *d) {} +static inline int pv_domain_initialise(struct domain *d) { return -EOPNOTSUPP; } + +static inline unsigned long pv_make_cr4(const struct vcpu *v) { return ~0ul; } + +#endif /* CONFIG_PV */ + +void paravirt_ctxt_switch_from(struct vcpu *v); +void paravirt_ctxt_switch_to(struct vcpu *v); + +#endif /* __X86_PV_DOMAIN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/pv/grant_table.h b/xen/arch/x86/include/asm/pv/grant_table.h new file mode 100644 index 0000000000..85442b6074 --- /dev/null +++ b/xen/arch/x86/include/asm/pv/grant_table.h @@ -0,0 +1,60 @@ +/* + * asm-x86/pv/grant_table.h + * + * Grant table interfaces for PV guests + * + * Copyright (C) 2017 Wei Liu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __X86_PV_GRANT_TABLE_H__ +#define __X86_PV_GRANT_TABLE_H__ + +#ifdef CONFIG_PV + +int create_grant_pv_mapping(uint64_t addr, mfn_t frame, + unsigned int flags, unsigned int cache_flags); +int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, + uint64_t new_addr, unsigned int flags); + +#else + +#include + +static inline int create_grant_pv_mapping(uint64_t addr, mfn_t frame, + unsigned int flags, + unsigned int cache_flags) +{ + return GNTST_general_error; +} + +static inline int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, + uint64_t new_addr, unsigned int flags) +{ + return GNTST_general_error; +} + +#endif + +#endif /* __X86_PV_GRANT_TABLE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/pv/mm.h b/xen/arch/x86/include/asm/pv/mm.h new file mode 100644 index 0000000000..9983f8257c --- /dev/null +++ b/xen/arch/x86/include/asm/pv/mm.h @@ -0,0 +1,60 @@ +/* + * asm-x86/pv/mm.h + * + * Memory management interfaces for PV guests + * + * Copyright (C) 2017 Wei Liu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __X86_PV_MM_H__ +#define __X86_PV_MM_H__ + +#ifdef CONFIG_PV + +int pv_ro_page_fault(unsigned long addr, struct cpu_user_regs *regs); + +int pv_set_gdt(struct vcpu *v, const unsigned long frames[], + unsigned int entries); +void pv_destroy_gdt(struct vcpu *v); + +bool pv_map_ldt_shadow_page(unsigned int off); +bool pv_destroy_ldt(struct vcpu *v); + +int validate_segdesc_page(struct page_info *page); + +#else + +#include +#include + +static inline int pv_ro_page_fault(unsigned long addr, + struct cpu_user_regs *regs) +{ + ASSERT_UNREACHABLE(); + return 0; +} + +static inline int pv_set_gdt(struct vcpu *v, const unsigned long frames[], + unsigned int entries) +{ ASSERT_UNREACHABLE(); return -EINVAL; } +static inline void pv_destroy_gdt(struct vcpu *v) { ASSERT_UNREACHABLE(); } + +static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; } +static inline bool pv_destroy_ldt(struct vcpu *v) +{ ASSERT_UNREACHABLE(); return false; } + +#endif + +#endif /* __X86_PV_MM_H__ */ diff --git a/xen/arch/x86/include/asm/pv/shim.h b/xen/arch/x86/include/asm/pv/shim.h new file mode 100644 index 0000000000..8a91f4f9df --- /dev/null +++ b/xen/arch/x86/include/asm/pv/shim.h @@ -0,0 +1,119 @@ +/****************************************************************************** + * asm-x86/guest/shim.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + * + * Copyright (c) 2017 Citrix Systems Ltd. + */ + +#ifndef __X86_PV_SHIM_H__ +#define __X86_PV_SHIM_H__ + +#include + +#if defined(CONFIG_PV_SHIM_EXCLUSIVE) +# define pv_shim 1 +#elif defined(CONFIG_PV_SHIM) +extern bool pv_shim; +#else +# define pv_shim 0 +#endif /* CONFIG_PV_SHIM{,_EXCLUSIVE} */ + +#ifdef CONFIG_PV_SHIM + +void pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start, + unsigned long va_start, unsigned long store_va, + unsigned long console_va, unsigned long vphysmap, + start_info_t *si); +int pv_shim_shutdown(uint8_t reason); +void pv_shim_inject_evtchn(unsigned int port); +long pv_shim_cpu_up(void *data); +long pv_shim_cpu_down(void *data); +void pv_shim_online_memory(unsigned int nr, unsigned int order); +void pv_shim_offline_memory(unsigned int nr, unsigned int order); +domid_t get_initial_domain_id(void); +uint64_t pv_shim_mem(uint64_t avail); +void pv_shim_fixup_e820(struct e820map *e820); +const struct platform_bad_page *pv_shim_reserved_pages(unsigned int *size); + +#else + +static inline void pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start, + unsigned long va_start, + unsigned long store_va, + unsigned long console_va, + unsigned long vphysmap, + start_info_t *si) +{ + ASSERT_UNREACHABLE(); +} +static inline int pv_shim_shutdown(uint8_t reason) +{ + ASSERT_UNREACHABLE(); + return 0; +} +static inline void pv_shim_inject_evtchn(unsigned int port) +{ + ASSERT_UNREACHABLE(); +} +static inline long pv_shim_cpu_up(void *data) +{ + ASSERT_UNREACHABLE(); + return 0; +} +static inline long pv_shim_cpu_down(void *data) +{ + ASSERT_UNREACHABLE(); + return 0; +} +static inline void pv_shim_online_memory(unsigned int nr, unsigned int order) +{ + ASSERT_UNREACHABLE(); +} +static inline void pv_shim_offline_memory(unsigned int nr, unsigned int order) +{ + ASSERT_UNREACHABLE(); +} +static inline domid_t get_initial_domain_id(void) +{ + return 0; +} +static inline uint64_t pv_shim_mem(uint64_t avail) +{ + ASSERT_UNREACHABLE(); + return 0; +} +static inline void pv_shim_fixup_e820(struct e820map *e820) +{ + ASSERT_UNREACHABLE(); +} +static inline const struct platform_bad_page * +pv_shim_reserved_pages(unsigned int *s) +{ + ASSERT_UNREACHABLE(); + return NULL; +} + +#endif + +#endif /* __X86_PV_SHIM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/pv/trace.h b/xen/arch/x86/include/asm/pv/trace.h new file mode 100644 index 0000000000..c616206eeb --- /dev/null +++ b/xen/arch/x86/include/asm/pv/trace.h @@ -0,0 +1,48 @@ +#ifndef XEN_X86_PV_TRACE_H +#define XEN_X86_PV_TRACE_H + +#include + +#include + +void __trace_pv_trap(int trapnr, unsigned long eip, + int use_error_code, unsigned error_code); +static inline void trace_pv_trap(int trapnr, unsigned long eip, + int use_error_code, unsigned error_code) +{ + if ( unlikely(tb_init_done) ) + __trace_pv_trap(trapnr, eip, use_error_code, error_code); +} + +void __trace_pv_page_fault(unsigned long addr, unsigned error_code); +static inline void trace_pv_page_fault(unsigned long addr, + unsigned error_code) +{ + if ( unlikely(tb_init_done) ) + __trace_pv_page_fault(addr, error_code); +} + +void __trace_trap_one_addr(unsigned event, unsigned long va); +static inline void trace_trap_one_addr(unsigned event, unsigned long va) +{ + if ( unlikely(tb_init_done) ) + __trace_trap_one_addr(event, va); +} + +void __trace_trap_two_addr(unsigned event, unsigned long va1, + unsigned long va2); +static inline void trace_trap_two_addr(unsigned event, unsigned long va1, + unsigned long va2) +{ + if ( unlikely(tb_init_done) ) + __trace_trap_two_addr(event, va1, va2); +} + +void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte); +static inline void trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) +{ + if ( unlikely(tb_init_done) ) + __trace_ptwr_emulation(addr, npte); +} + +#endif /* XEN_X86_PV_TRACE_H */ diff --git a/xen/arch/x86/include/asm/pv/traps.h b/xen/arch/x86/include/asm/pv/traps.h new file mode 100644 index 0000000000..855203c4e2 --- /dev/null +++ b/xen/arch/x86/include/asm/pv/traps.h @@ -0,0 +1,71 @@ +/* + * pv/traps.h + * + * PV guest traps interface definitions + * + * Copyright (C) 2017 Wei Liu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms and conditions of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; If not, see . + */ + +#ifndef __X86_PV_TRAPS_H__ +#define __X86_PV_TRAPS_H__ + +#ifdef CONFIG_PV + +#include + +void pv_trap_init(void); + +int pv_raise_nmi(struct vcpu *v); + +int pv_emulate_privileged_op(struct cpu_user_regs *regs); +void pv_emulate_gate_op(struct cpu_user_regs *regs); +bool pv_emulate_invalid_op(struct cpu_user_regs *regs); + +static inline bool pv_trap_callback_registered(const struct vcpu *v, + uint8_t vector) +{ + return v->arch.pv.trap_ctxt[vector].address; +} + +#else /* !CONFIG_PV */ + +#include + +static inline void pv_trap_init(void) {} + +static inline int pv_raise_nmi(struct vcpu *v) { return -EOPNOTSUPP; } + +static inline int pv_emulate_privileged_op(struct cpu_user_regs *regs) { return 0; } +static inline void pv_emulate_gate_op(struct cpu_user_regs *regs) {} +static inline bool pv_emulate_invalid_op(struct cpu_user_regs *regs) { return true; } + +static inline bool pv_trap_callback_registered(const struct vcpu *v, + uint8_t vector) +{ + return false; +} +#endif /* CONFIG_PV */ + +#endif /* __X86_PV_TRAPS_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/random.h b/xen/arch/x86/include/asm/random.h new file mode 100644 index 0000000000..9e1fe0bc1d --- /dev/null +++ b/xen/arch/x86/include/asm/random.h @@ -0,0 +1,16 @@ +#ifndef __ASM_RANDOM_H__ +#define __ASM_RANDOM_H__ + +#include + +static inline unsigned int arch_get_random(void) +{ + unsigned int val = 0; + + if ( cpu_has(¤t_cpu_data, X86_FEATURE_RDRAND) ) + asm volatile ( ".byte 0x0f,0xc7,0xf0" : "+a" (val) ); + + return val; +} + +#endif /* __ASM_RANDOM_H__ */ diff --git a/xen/arch/x86/include/asm/regs.h b/xen/arch/x86/include/asm/regs.h new file mode 100644 index 0000000000..3fb94deedc --- /dev/null +++ b/xen/arch/x86/include/asm/regs.h @@ -0,0 +1,33 @@ + +#ifndef __X86_REGS_H__ +#define __X86_REGS_H__ + +#include + +#define guest_mode(r) \ +({ \ + unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r); \ + /* Frame pointer must point into current CPU stack. */ \ + ASSERT(diff < STACK_SIZE); \ + /* If not a guest frame, it must be a hypervisor frame. */ \ + if ( diff < PRIMARY_STACK_SIZE ) \ + ASSERT(!diff || ((r)->cs == __HYPERVISOR_CS)); \ + /* Return TRUE if it's a guest frame. */ \ + !diff || ((r)->cs != __HYPERVISOR_CS); \ +}) + +#define read_sreg(name) ({ \ + unsigned int __sel; \ + asm ( "mov %%" STR(name) ",%0" : "=r" (__sel) ); \ + __sel; \ +}) + +static inline void read_sregs(struct cpu_user_regs *regs) +{ + asm ( "mov %%ds, %0" : "=m" (regs->ds) ); + asm ( "mov %%es, %0" : "=m" (regs->es) ); + asm ( "mov %%fs, %0" : "=m" (regs->fs) ); + asm ( "mov %%gs, %0" : "=m" (regs->gs) ); +} + +#endif /* __X86_REGS_H__ */ diff --git a/xen/arch/x86/include/asm/setup.h b/xen/arch/x86/include/asm/setup.h new file mode 100644 index 0000000000..7dc03b6b8d --- /dev/null +++ b/xen/arch/x86/include/asm/setup.h @@ -0,0 +1,75 @@ +#ifndef __X86_SETUP_H_ +#define __X86_SETUP_H_ + +#include +#include + +extern const char __2M_text_start[], __2M_text_end[]; +extern const char __ro_after_init_start[], __ro_after_init_end[]; +extern const char __2M_rodata_start[], __2M_rodata_end[]; +extern char __2M_init_start[], __2M_init_end[]; +extern char __2M_rwdata_start[], __2M_rwdata_end[]; + +extern unsigned long xenheap_initial_phys_start; +extern uint64_t boot_tsc_stamp; + +extern void *stack_start; + +void early_cpu_init(void); +void early_time_init(void); + +void set_nr_cpu_ids(unsigned int max_cpus); + +void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); +void arch_init_memory(void); +void subarch_init_memory(void); + +void init_IRQ(void); + +#ifdef CONFIG_VIDEO +void vesa_init(void); +void vesa_mtrr_init(void); +#else +static inline void vesa_init(void) {}; +static inline void vesa_mtrr_init(void) {}; +#endif + +int construct_dom0( + struct domain *d, + const module_t *kernel, unsigned long kernel_headroom, + module_t *initrd, + char *cmdline); +void setup_io_bitmap(struct domain *d); + +unsigned long initial_images_nrpages(nodeid_t node); +void discard_initial_images(void); +void *bootstrap_map(const module_t *mod); + +int xen_in_range(unsigned long mfn); + +void microcode_grab_module( + unsigned long *, const multiboot_info_t *); + +extern uint8_t kbd_shift_flags; + +#ifdef NDEBUG +# define highmem_start 0 +#else +extern unsigned long highmem_start; +#endif + +extern int8_t opt_smt; + +#ifdef CONFIG_SHADOW_PAGING +extern bool opt_dom0_shadow; +#else +#define opt_dom0_shadow false +#endif +extern bool opt_dom0_pvh; +extern bool opt_dom0_verbose; +extern bool opt_dom0_cpuid_faulting; +extern bool opt_dom0_msr_relaxed; + +#define max_init_domid (0) + +#endif diff --git a/xen/arch/x86/include/asm/shadow.h b/xen/arch/x86/include/asm/shadow.h new file mode 100644 index 0000000000..e25f9604d8 --- /dev/null +++ b/xen/arch/x86/include/asm/shadow.h @@ -0,0 +1,273 @@ +/****************************************************************************** + * include/asm-x86/shadow.h + * + * Parts of this code are Copyright (c) 2006 by XenSource Inc. + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef _XEN_SHADOW_H +#define _XEN_SHADOW_H + +#include +#include +#include +#include +#include +#include +#include + +#include + +/***************************************************************************** + * Macros to tell which shadow paging mode a domain is in*/ + +#define shadow_mode_enabled(_d) paging_mode_shadow(_d) +#define shadow_mode_refcounts(_d) (paging_mode_shadow(_d) && \ + paging_mode_refcounts(_d)) +#define shadow_mode_log_dirty(_d) (paging_mode_shadow(_d) && \ + paging_mode_log_dirty(_d)) +#define shadow_mode_translate(_d) (paging_mode_shadow(_d) && \ + paging_mode_translate(_d)) +#define shadow_mode_external(_d) (paging_mode_shadow(_d) && \ + paging_mode_external(_d)) + +/***************************************************************************** + * Entry points into the shadow code */ + +/* Set up the shadow-specific parts of a domain struct at start of day. + * Called from paging_domain_init(). */ +int shadow_domain_init(struct domain *d); + +/* Setup the shadow-specific parts of a vcpu struct. It is called by + * paging_vcpu_init() in paging.c */ +void shadow_vcpu_init(struct vcpu *v); + +#ifdef CONFIG_SHADOW_PAGING + +/* Enable an arbitrary shadow mode. Call once at domain creation. */ +int shadow_enable(struct domain *d, u32 mode); + +/* Enable VRAM dirty bit tracking. */ +int shadow_track_dirty_vram(struct domain *d, + unsigned long first_pfn, + unsigned int nr_frames, + XEN_GUEST_HANDLE(void) dirty_bitmap); + +/* Handler for shadow control ops: operations from user-space to enable + * and disable ephemeral shadow modes (test mode and log-dirty mode) and + * manipulate the log-dirty bitmap. */ +int shadow_domctl(struct domain *d, + struct xen_domctl_shadow_op *sc, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl); + +/* Call when destroying a vcpu/domain */ +void shadow_vcpu_teardown(struct vcpu *v); +void shadow_teardown(struct domain *d, bool *preempted); + +/* Call once all of the references to the domain have gone away */ +void shadow_final_teardown(struct domain *d); + +void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all); + +/* Adjust shadows ready for a guest page to change its type. */ +void shadow_prepare_page_type_change(struct domain *d, struct page_info *page, + unsigned long new_type); + +/* Discard _all_ mappings from the domain's shadows. */ +void shadow_blow_tables_per_domain(struct domain *d); + +/* Set the pool of shadow pages to the required number of pages. + * Input will be rounded up to at least shadow_min_acceptable_pages(), + * plus space for the p2m table. + * Returns 0 for success, non-zero for failure. */ +int shadow_set_allocation(struct domain *d, unsigned int pages, + bool *preempted); + +#else /* !CONFIG_SHADOW_PAGING */ + +#define shadow_vcpu_teardown(v) ASSERT(is_pv_vcpu(v)) +#define shadow_teardown(d, p) ASSERT(is_pv_domain(d)) +#define shadow_final_teardown(d) ASSERT(is_pv_domain(d)) +#define shadow_enable(d, mode) \ + ({ ASSERT(is_pv_domain(d)); -EOPNOTSUPP; }) +#define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \ + ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; }) +#define shadow_set_allocation(d, pages, preempted) \ + ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; }) + +static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn, + int fast, int all) {} + +static inline void shadow_prepare_page_type_change(struct domain *d, + struct page_info *page, + unsigned long new_type) {} + +static inline void shadow_blow_tables_per_domain(struct domain *d) {} + +static inline int shadow_domctl(struct domain *d, + struct xen_domctl_shadow_op *sc, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) +{ + return -EINVAL; +} + +#endif /* CONFIG_SHADOW_PAGING */ + +/* + * Mitigations for L1TF / CVE-2018-3620 for PV guests. + * + * We cannot alter an architecturally-legitimate PTE which a PV guest has + * chosen to write, as traditional paged-out metadata is L1TF-vulnerable. + * What we can do is force a PV guest which writes a vulnerable PTE into + * shadow mode, so Xen controls the pagetables which are reachable by the CPU + * pagewalk. + * + * The core of the L1TF vulnerability is that the address bits of the PTE + * (accounting for PSE and factoring in the level-relevant part of the linear + * access) are sent for an L1D lookup (to retrieve the next-level PTE, or + * eventual memory address) before the Present or reserved bits (which would + * cause a terminal fault) are accounted for. If an L1D hit occurs, the + * resulting data is available for potentially dependent instructions. + * + * For Present PTEs, the PV type-count safety logic ensures that the address + * bits always point at a guest-accessible frame, which is safe WRT L1TF from + * Xen's point of view. In practice, a PV guest should be unable to set any + * reserved bits, so should be unable to create any present L1TF-vulnerable + * PTEs at all. + * + * Therefore, these safety checks apply to Not-Present PTEs only, where + * traditionally, Xen would have let the guest write any value it chose. + * + * The all-zero PTE potentially leaks mfn 0. All software on the system is + * expected to cooperate and not put any secrets there. In a Xen system, + * neither Xen nor dom0 are expected to touch mfn 0, as it typically contains + * the real mode IVT and Bios Data Area. Therefore, mfn 0 is considered safe. + * + * Any PTE whose address is higher than the maximum cacheable address is safe, + * as it won't get an L1D hit. + * + * Speculative superpages also need accounting for, as PSE is considered + * irrespective of Present. We disallow PSE being set, as it allows an + * attacker to leak 2M or 1G of data starting from mfn 0. Also, because of + * recursive/linear pagetables, we must consider PSE even at L4, as hardware + * will interpret an L4e as an L3e during a recursive walk. + */ + +static inline bool is_l1tf_safe_maddr(intpte_t pte) +{ + paddr_t maddr = pte & l1tf_addr_mask; + + return maddr == 0 || maddr >= l1tf_safe_maddr; +} + +#ifdef CONFIG_PV + +static inline bool pv_l1tf_check_pte(struct domain *d, unsigned int level, + intpte_t pte) +{ + ASSERT(is_pv_domain(d)); + ASSERT(!(pte & _PAGE_PRESENT)); + + if ( d->arch.pv.check_l1tf && !paging_mode_sh_forced(d) && + (((level > 1) && (pte & _PAGE_PSE)) || !is_l1tf_safe_maddr(pte)) ) + { +#ifdef CONFIG_SHADOW_PAGING + struct tasklet *t = &d->arch.paging.shadow.pv_l1tf_tasklet; + + printk(XENLOG_G_WARNING + "d%d L1TF-vulnerable L%ue %016"PRIx64" - Shadowing\n", + d->domain_id, level, pte); + /* + * Safety consideration for accessing tasklet.scheduled_on without the + * tasklet lock. This is a singleshot tasklet with the side effect of + * setting PG_SH_forced (checked just above). Multiple vcpus can race + * to schedule the tasklet, but if we observe it scheduled anywhere, + * that is good enough. + */ + smp_rmb(); + if ( !tasklet_is_scheduled(t) ) + tasklet_schedule(t); +#else + printk(XENLOG_G_ERR + "d%d L1TF-vulnerable L%ue %016"PRIx64" - Crashing\n", + d->domain_id, level, pte); + domain_crash(d); +#endif + return true; + } + + return false; +} + +static inline bool pv_l1tf_check_l1e(struct domain *d, l1_pgentry_t l1e) +{ + return pv_l1tf_check_pte(d, 1, l1e.l1); +} + +static inline bool pv_l1tf_check_l2e(struct domain *d, l2_pgentry_t l2e) +{ + return pv_l1tf_check_pte(d, 2, l2e.l2); +} + +static inline bool pv_l1tf_check_l3e(struct domain *d, l3_pgentry_t l3e) +{ + return pv_l1tf_check_pte(d, 3, l3e.l3); +} + +static inline bool pv_l1tf_check_l4e(struct domain *d, l4_pgentry_t l4e) +{ + return pv_l1tf_check_pte(d, 4, l4e.l4); +} + +void pv_l1tf_tasklet(void *data); + +static inline void pv_l1tf_domain_init(struct domain *d) +{ + d->arch.pv.check_l1tf = is_hardware_domain(d) ? opt_pv_l1tf_hwdom + : opt_pv_l1tf_domu; + +#ifdef CONFIG_SHADOW_PAGING + tasklet_init(&d->arch.paging.shadow.pv_l1tf_tasklet, pv_l1tf_tasklet, d); +#endif +} + +static inline void pv_l1tf_domain_destroy(struct domain *d) +{ +#ifdef CONFIG_SHADOW_PAGING + tasklet_kill(&d->arch.paging.shadow.pv_l1tf_tasklet); +#endif +} + +#endif /* CONFIG_PV */ + +/* Remove all shadows of the guest mfn. */ +static inline void shadow_remove_all_shadows(struct domain *d, mfn_t gmfn) +{ + /* See the comment about locking in sh_remove_shadows */ + sh_remove_shadows(d, gmfn, 0 /* Be thorough */, 1 /* Must succeed */); +} + +#endif /* _XEN_SHADOW_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/shared.h b/xen/arch/x86/include/asm/shared.h new file mode 100644 index 0000000000..dd3ae8c263 --- /dev/null +++ b/xen/arch/x86/include/asm/shared.h @@ -0,0 +1,79 @@ +#ifndef __XEN_X86_SHARED_H__ +#define __XEN_X86_SHARED_H__ + +#ifdef CONFIG_COMPAT + +#define nmi_reason(d) (!has_32bit_shinfo(d) ? \ + (u32 *)&(d)->shared_info->native.arch.nmi_reason : \ + (u32 *)&(d)->shared_info->compat.arch.nmi_reason) + +#define GET_SET_SHARED(type, field) \ +static inline type arch_get_##field(const struct domain *d) \ +{ \ + return !has_32bit_shinfo(d) ? \ + d->shared_info->native.arch.field : \ + d->shared_info->compat.arch.field; \ +} \ +static inline void arch_set_##field(struct domain *d, \ + type val) \ +{ \ + if ( !has_32bit_shinfo(d) ) \ + d->shared_info->native.arch.field = val; \ + else \ + d->shared_info->compat.arch.field = val; \ +} + +#define GET_SET_VCPU(type, field) \ +static inline type arch_get_##field(const struct vcpu *v) \ +{ \ + return !has_32bit_shinfo(v->domain) ? \ + v->vcpu_info->native.arch.field : \ + v->vcpu_info->compat.arch.field; \ +} \ +static inline void arch_set_##field(struct vcpu *v, \ + type val) \ +{ \ + if ( !has_32bit_shinfo(v->domain) ) \ + v->vcpu_info->native.arch.field = val; \ + else \ + v->vcpu_info->compat.arch.field = val; \ +} + +#else + +#define nmi_reason(d) (&(d)->shared_info->arch.nmi_reason) + +#define GET_SET_SHARED(type, field) \ +static inline type arch_get_##field(const struct domain *d) \ +{ \ + return d->shared_info->arch.field; \ +} \ +static inline void arch_set_##field(struct domain *d, \ + type val) \ +{ \ + d->shared_info->arch.field = val; \ +} + +#define GET_SET_VCPU(type, field) \ +static inline type arch_get_##field(const struct vcpu *v) \ +{ \ + return v->vcpu_info->arch.field; \ +} \ +static inline void arch_set_##field(struct vcpu *v, \ + type val) \ +{ \ + v->vcpu_info->arch.field = val; \ +} + +#endif + +GET_SET_SHARED(unsigned long, max_pfn) +GET_SET_SHARED(xen_pfn_t, pfn_to_mfn_frame_list_list) +GET_SET_SHARED(unsigned long, nmi_reason) + +GET_SET_VCPU(unsigned long, cr2) + +#undef GET_SET_VCPU +#undef GET_SET_SHARED + +#endif /* __XEN_X86_SHARED_H__ */ diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h new file mode 100644 index 0000000000..f7485f602e --- /dev/null +++ b/xen/arch/x86/include/asm/smp.h @@ -0,0 +1,90 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#endif + +#define BAD_APICID (-1U) +#define INVALID_CUID (~0U) /* AMD Compute Unit ID */ +#ifndef __ASSEMBLY__ + +/* + * Private routines/data + */ +DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask); +DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask); +DECLARE_PER_CPU(cpumask_var_t, scratch_cpumask); +DECLARE_PER_CPU(cpumask_var_t, send_ipi_cpumask); + +/* + * Do we, for platform reasons, need to actually keep CPUs online when we + * would otherwise prefer them to be off? + */ +extern bool park_offline_cpus; + +void smp_send_nmi_allbutself(void); + +void send_IPI_mask(const cpumask_t *, int vector); +void send_IPI_self(int vector); + +extern void (*mtrr_hook) (void); + +extern void zap_low_mappings(void); + +extern u32 x86_cpu_to_apicid[]; + +#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] + +#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) +extern void cpu_exit_clear(unsigned int cpu); +extern void cpu_uninit(unsigned int cpu); +int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm); + +/* + * This function is needed by all SMP systems. It must _always_ be valid + * from the initial startup. We map APIC_BASE very early in page_setup(), + * so this is correct in the x86 case. + */ +#define smp_processor_id() get_processor_id() + +void __stop_this_cpu(void); + +long cpu_up_helper(void *data); +long cpu_down_helper(void *data); + +long core_parking_helper(void *data); +bool core_parking_remove(unsigned int cpu); +uint32_t get_cur_idle_nums(void); + +/* + * The value may be greater than the actual socket number in the system and + * is required not to change from the initial startup. + */ +extern unsigned int nr_sockets; + +void set_nr_sockets(void); + +/* Representing HT and core siblings in each socket. */ +extern cpumask_t **socket_cpumask; + +/* + * To be used only while no context switch can occur on the cpu, i.e. + * by certain scheduling code only. + */ +#define get_cpu_current(cpu) \ + (get_cpu_info_from_stack((unsigned long)stack_base[cpu])->current_vcpu) + +extern unsigned int disabled_cpus; +extern bool unaccounted_cpus; + +#endif /* !__ASSEMBLY__ */ + +#endif diff --git a/xen/arch/x86/include/asm/softirq.h b/xen/arch/x86/include/asm/softirq.h new file mode 100644 index 0000000000..415ee866c7 --- /dev/null +++ b/xen/arch/x86/include/asm/softirq.h @@ -0,0 +1,14 @@ +#ifndef __ASM_SOFTIRQ_H__ +#define __ASM_SOFTIRQ_H__ + +#define NMI_SOFTIRQ (NR_COMMON_SOFTIRQS + 0) +#define TIME_CALIBRATE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1) +#define VCPU_KICK_SOFTIRQ (NR_COMMON_SOFTIRQS + 2) + +#define MACHINE_CHECK_SOFTIRQ (NR_COMMON_SOFTIRQS + 3) +#define HVM_DPCI_SOFTIRQ (NR_COMMON_SOFTIRQS + 4) +#define NR_ARCH_SOFTIRQS 5 + +bool arch_skip_send_event_check(unsigned int cpu); + +#endif /* __ASM_SOFTIRQ_H__ */ diff --git a/xen/arch/x86/include/asm/spec_ctrl.h b/xen/arch/x86/include/asm/spec_ctrl.h new file mode 100644 index 0000000000..a803d16f90 --- /dev/null +++ b/xen/arch/x86/include/asm/spec_ctrl.h @@ -0,0 +1,151 @@ +/****************************************************************************** + * include/asm-x86/spec_ctrl.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + * + * Copyright (c) 2017-2018 Citrix Systems Ltd. + */ + +#ifndef __X86_SPEC_CTRL_H__ +#define __X86_SPEC_CTRL_H__ + +/* Encoding of cpuinfo.spec_ctrl_flags */ +#define SCF_use_shadow (1 << 0) +#define SCF_ist_wrmsr (1 << 1) +#define SCF_ist_rsb (1 << 2) + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +void init_speculation_mitigations(void); + +extern bool opt_ibpb; +extern bool opt_ssbd; +extern int8_t opt_eager_fpu; +extern int8_t opt_l1d_flush; + +extern bool bsp_delay_spec_ctrl; +extern uint8_t default_xen_spec_ctrl; +extern uint8_t default_spec_ctrl_flags; + +extern int8_t opt_xpti_hwdom, opt_xpti_domu; + +extern bool cpu_has_bug_l1tf; +extern int8_t opt_pv_l1tf_hwdom, opt_pv_l1tf_domu; + +/* + * The L1D address mask, which might be wider than reported in CPUID, and the + * system physical address above which there are believed to be no cacheable + * memory regions, thus unable to leak data via the L1TF vulnerability. + */ +extern paddr_t l1tf_addr_mask, l1tf_safe_maddr; + +extern uint64_t default_xen_mcu_opt_ctrl; + +static inline void init_shadow_spec_ctrl_state(void) +{ + struct cpu_info *info = get_cpu_info(); + + info->shadow_spec_ctrl = 0; + info->xen_spec_ctrl = default_xen_spec_ctrl; + info->spec_ctrl_flags = default_spec_ctrl_flags; + + /* + * For least latency, the VERW selector should be a writeable data + * descriptor resident in the cache. __HYPERVISOR_DS32 shares a cache + * line with __HYPERVISOR_CS, so is expected to be very cache-hot. + */ + info->verw_sel = __HYPERVISOR_DS32; +} + +/* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */ +static always_inline void spec_ctrl_enter_idle(struct cpu_info *info) +{ + uint32_t val = 0; + + /* + * Branch Target Injection: + * + * Latch the new shadow value, then enable shadowing, then update the MSR. + * There are no SMP issues here; only local processor ordering concerns. + */ + info->shadow_spec_ctrl = val; + barrier(); + info->spec_ctrl_flags |= SCF_use_shadow; + barrier(); + alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE, + "a" (val), "c" (MSR_SPEC_CTRL), "d" (0)); + barrier(); + + /* + * Microarchitectural Store Buffer Data Sampling: + * + * On vulnerable systems, store buffer entries are statically partitioned + * between active threads. When entering idle, our store buffer entries + * are re-partitioned to allow the other threads to use them. + * + * Flush the buffers to ensure that no sensitive data of ours can be + * leaked by a sibling after it gets our store buffer entries. + * + * Note: VERW must be encoded with a memory operand, as it is only that + * form which causes a flush. + */ + alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE, + [sel] "m" (info->verw_sel)); +} + +/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */ +static always_inline void spec_ctrl_exit_idle(struct cpu_info *info) +{ + uint32_t val = info->xen_spec_ctrl; + + /* + * Branch Target Injection: + * + * Disable shadowing before updating the MSR. There are no SMP issues + * here; only local processor ordering concerns. + */ + info->spec_ctrl_flags &= ~SCF_use_shadow; + barrier(); + alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE, + "a" (val), "c" (MSR_SPEC_CTRL), "d" (0)); + barrier(); + + /* + * Microarchitectural Store Buffer Data Sampling: + * + * On vulnerable systems, store buffer entries are statically partitioned + * between active threads. When exiting idle, the other threads store + * buffer entries are re-partitioned to give us some. + * + * We now have store buffer entries with stale data from sibling threads. + * A flush if necessary will be performed on the return to guest path. + */ +} + +#endif /* __ASSEMBLY__ */ +#endif /* !__X86_SPEC_CTRL_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h b/xen/arch/x86/include/asm/spec_ctrl_asm.h new file mode 100644 index 0000000000..cb34299a86 --- /dev/null +++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h @@ -0,0 +1,342 @@ +/****************************************************************************** + * include/asm-x86/spec_ctrl.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + * + * Copyright (c) 2017-2018 Citrix Systems Ltd. + */ + +#ifndef __X86_SPEC_CTRL_ASM_H__ +#define __X86_SPEC_CTRL_ASM_H__ + +#ifdef __ASSEMBLY__ +#include +#include + +/* + * Saving and restoring MSR_SPEC_CTRL state is a little tricky. + * + * We want the guests choice of SPEC_CTRL while in guest context, and Xen's + * choice (set or clear, depending on the hardware) while running in Xen + * context. Therefore, a simplistic algorithm is: + * + * - Set/clear IBRS on entry to Xen + * - Set the guests' choice on exit to guest + * - Leave SPEC_CTRL unchanged on exit to xen + * + * There are two complicating factors: + * 1) HVM guests can have direct access to the MSR, so it can change + * behind Xen's back. + * 2) An NMI or MCE can interrupt at any point, including early in the entry + * path, or late in the exit path after restoring the guest value. This + * will corrupt the guest value. + * + * Factor 1 is dealt with by relying on NMIs/MCEs being blocked immediately + * after VMEXIT. The VMEXIT-specific code reads MSR_SPEC_CTRL and updates + * current before loading Xen's MSR_SPEC_CTRL setting. + * + * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow + * boolean in the per cpu spec_ctrl_flags. The synchronous use is: + * + * 1) Store guest value in shadow_spec_ctrl + * 2) Set the use_shadow boolean + * 3) Load guest value into MSR_SPEC_CTRL + * 4) Exit to guest + * 5) Entry from guest + * 6) Clear the use_shadow boolean + * 7) Load Xen's value into MSR_SPEC_CTRL + * + * The asynchronous use for interrupts/exceptions is: + * - Set/clear IBRS on entry to Xen + * - On exit to Xen, check use_shadow + * - If set, load shadow_spec_ctrl + * + * Therefore, an interrupt/exception which hits the synchronous path between + * steps 2 and 6 will restore the shadow value rather than leaving Xen's value + * loaded and corrupting the value used in guest context. + * + * The following ASM fragments implement this algorithm. See their local + * comments for further details. + * - SPEC_CTRL_ENTRY_FROM_HVM + * - SPEC_CTRL_ENTRY_FROM_PV + * - SPEC_CTRL_ENTRY_FROM_INTR + * - SPEC_CTRL_ENTRY_FROM_INTR_IST + * - SPEC_CTRL_EXIT_TO_XEN_IST + * - SPEC_CTRL_EXIT_TO_XEN + * - SPEC_CTRL_EXIT_TO_PV + * - SPEC_CTRL_EXIT_TO_HVM + */ + +.macro DO_OVERWRITE_RSB tmp=rax +/* + * Requires nothing + * Clobbers \tmp (%rax by default), %rcx + * + * Requires 256 bytes of {,shadow}stack space, but %rsp/SSP has no net + * change. Based on Google's performance numbers, the loop is unrolled to 16 + * iterations and two calls per iteration. + * + * The call filling the RSB needs a nonzero displacement. A nop would do, but + * we use "1: pause; lfence; jmp 1b" to safely contains any ret-based + * speculation, even if the loop is speculatively executed prematurely. + * + * %rsp is preserved by using an extra GPR because a) we've got plenty spare, + * b) the two movs are shorter to encode than `add $32*8, %rsp`, and c) can be + * optimised with mov-elimination in modern cores. + */ + mov $16, %ecx /* 16 iterations, two calls per loop */ + mov %rsp, %\tmp /* Store the current %rsp */ + +.L\@_fill_rsb_loop: + + .irp n, 1, 2 /* Unrolled twice. */ + call .L\@_insert_rsb_entry_\n /* Create an RSB entry. */ + +.L\@_capture_speculation_\n: + pause + lfence + jmp .L\@_capture_speculation_\n /* Capture rogue speculation. */ + +.L\@_insert_rsb_entry_\n: + .endr + + sub $1, %ecx + jnz .L\@_fill_rsb_loop + mov %\tmp, %rsp /* Restore old %rsp */ + +#ifdef CONFIG_XEN_SHSTK + mov $1, %ecx + rdsspd %ecx + cmp $1, %ecx + je .L\@_shstk_done + mov $64, %ecx /* 64 * 4 bytes, given incsspd */ + incsspd %ecx /* Restore old SSP */ +.L\@_shstk_done: +#endif +.endm + +.macro DO_SPEC_CTRL_ENTRY_FROM_HVM +/* + * Requires %rbx=current, %rsp=regs/cpuinfo + * Clobbers %rax, %rcx, %rdx + * + * The common case is that a guest has direct access to MSR_SPEC_CTRL, at + * which point we need to save the guest value before setting IBRS for Xen. + * Unilaterally saving the guest value is shorter and faster than checking. + */ + mov $MSR_SPEC_CTRL, %ecx + rdmsr + + /* Stash the value from hardware. */ + mov VCPU_arch_msrs(%rbx), %rdx + mov %eax, VCPUMSR_spec_ctrl_raw(%rdx) + xor %edx, %edx + + /* Clear SPEC_CTRL shadowing *before* loading Xen's value. */ + andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) + + /* Load Xen's intended value. */ + movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax + wrmsr +.endm + +.macro DO_SPEC_CTRL_ENTRY maybexen:req +/* + * Requires %rsp=regs (also cpuinfo if !maybexen) + * Requires %r14=stack_end (if maybexen) + * Clobbers %rax, %rcx, %rdx + * + * PV guests can't update MSR_SPEC_CTRL behind Xen's back, so no need to read + * it back. Entries from guest context need to clear SPEC_CTRL shadowing, + * while entries from Xen must leave shadowing in its current state. + */ + mov $MSR_SPEC_CTRL, %ecx + xor %edx, %edx + + /* + * Clear SPEC_CTRL shadowing *before* loading Xen's value. If entering + * from a possibly-xen context, %rsp doesn't necessarily alias the cpuinfo + * block so calculate the position directly. + */ + .if \maybexen + xor %eax, %eax + /* Branchless `if ( !xen ) clear_shadowing` */ + testb $3, UREGS_cs(%rsp) + setnz %al + not %eax + and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) + movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax + .else + andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) + movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax + .endif + + wrmsr +.endm + +.macro DO_SPEC_CTRL_EXIT_TO_XEN +/* + * Requires %rbx=stack_end + * Clobbers %rax, %rcx, %rdx + * + * When returning to Xen context, look to see whether SPEC_CTRL shadowing is + * in effect, and reload the shadow value. This covers race conditions which + * exist with an NMI/MCE/etc hitting late in the return-to-guest path. + */ + xor %edx, %edx + + testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) + jz .L\@_skip + + mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%rbx), %eax + mov $MSR_SPEC_CTRL, %ecx + wrmsr + +.L\@_skip: +.endm + +.macro DO_SPEC_CTRL_EXIT_TO_GUEST +/* + * Requires %eax=spec_ctrl, %rsp=regs/cpuinfo + * Clobbers %rcx, %rdx + * + * When returning to guest context, set up SPEC_CTRL shadowing and load the + * guest value. + */ + /* Set up shadow value *before* enabling shadowing. */ + mov %eax, CPUINFO_shadow_spec_ctrl(%rsp) + + /* Set SPEC_CTRL shadowing *before* loading the guest value. */ + orb $SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) + + mov $MSR_SPEC_CTRL, %ecx + xor %edx, %edx + wrmsr +.endm + +/* Use after a VMEXIT from an HVM guest. */ +#define SPEC_CTRL_ENTRY_FROM_HVM \ + ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \ + ALTERNATIVE "", DO_SPEC_CTRL_ENTRY_FROM_HVM, \ + X86_FEATURE_SC_MSR_HVM + +/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */ +#define SPEC_CTRL_ENTRY_FROM_PV \ + ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \ + ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), \ + X86_FEATURE_SC_MSR_PV + +/* Use in interrupt/exception context. May interrupt Xen or PV context. */ +#define SPEC_CTRL_ENTRY_FROM_INTR \ + ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \ + ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), \ + X86_FEATURE_SC_MSR_PV + +/* Use when exiting to Xen context. */ +#define SPEC_CTRL_EXIT_TO_XEN \ + ALTERNATIVE "", \ + DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR_PV + +/* Use when exiting to PV guest context. */ +#define SPEC_CTRL_EXIT_TO_PV \ + ALTERNATIVE "", \ + DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV; \ + ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \ + X86_FEATURE_SC_VERW_PV + +/* Use when exiting to HVM guest context. */ +#define SPEC_CTRL_EXIT_TO_HVM \ + ALTERNATIVE "", \ + DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM; \ + ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \ + X86_FEATURE_SC_VERW_HVM + +/* + * Use in IST interrupt/exception context. May interrupt Xen or PV context. + * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume + * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has + * been reloaded. + */ +.macro SPEC_CTRL_ENTRY_FROM_INTR_IST +/* + * Requires %rsp=regs, %r14=stack_end + * Clobbers %rax, %rcx, %rdx + * + * This is logical merge of DO_OVERWRITE_RSB and DO_SPEC_CTRL_ENTRY + * maybexen=1, but with conditionals rather than alternatives. + */ + movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %eax + + test $SCF_ist_rsb, %al + jz .L\@_skip_rsb + + DO_OVERWRITE_RSB tmp=rdx /* Clobbers %rcx/%rdx */ + +.L\@_skip_rsb: + + test $SCF_ist_wrmsr, %al + jz .L\@_skip_wrmsr + + xor %edx, %edx + testb $3, UREGS_cs(%rsp) + setnz %dl + not %edx + and %dl, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) + + /* Load Xen's intended value. */ + mov $MSR_SPEC_CTRL, %ecx + movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax + xor %edx, %edx + wrmsr + + /* Opencoded UNLIKELY_START() with no condition. */ +UNLIKELY_DISPATCH_LABEL(\@_serialise): + .subsection 1 + /* + * In the case that we might need to set SPEC_CTRL.IBRS for safety, we + * need to ensure that an attacker can't poison the `jz .L\@_skip_wrmsr` + * to speculate around the WRMSR. As a result, we need a dispatch + * serialising instruction in the else clause. + */ +.L\@_skip_wrmsr: + lfence + UNLIKELY_END(\@_serialise) +.endm + +/* Use when exiting to Xen in IST context. */ +.macro SPEC_CTRL_EXIT_TO_XEN_IST +/* + * Requires %rbx=stack_end + * Clobbers %rax, %rcx, %rdx + */ + testb $SCF_ist_wrmsr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) + jz .L\@_skip + + DO_SPEC_CTRL_EXIT_TO_XEN + +.L\@_skip: +.endm + +#endif /* __ASSEMBLY__ */ +#endif /* !__X86_SPEC_CTRL_ASM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/spinlock.h b/xen/arch/x86/include/asm/spinlock.h new file mode 100644 index 0000000000..56f6095752 --- /dev/null +++ b/xen/arch/x86/include/asm/spinlock.h @@ -0,0 +1,27 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#define _raw_read_unlock(l) \ + BUILD_BUG_ON(sizeof((l)->lock) != 4); /* Clang doesn't support %z in asm. */ \ + asm volatile ( "lock; decl %0" : "+m" ((l)->lock) :: "memory" ) + +/* + * On x86 the only reordering is of reads with older writes. In the + * lock case, the read in observe_head() can only be reordered with + * writes that precede it, and moving a write _into_ a locked section + * is OK. In the release case, the write in add_sized() can only be + * reordered with reads that follow it, and hoisting a read _into_ a + * locked region is OK. + */ +#define arch_lock_acquire_barrier() barrier() +#define arch_lock_release_barrier() barrier() + +#define arch_lock_relax() cpu_relax() +#define arch_lock_signal() +#define arch_lock_signal_wmb() \ +({ \ + smp_wmb(); \ + arch_lock_signal(); \ +}) + +#endif /* __ASM_SPINLOCK_H */ diff --git a/xen/arch/x86/include/asm/string.h b/xen/arch/x86/include/asm/string.h new file mode 100644 index 0000000000..f08d95096e --- /dev/null +++ b/xen/arch/x86/include/asm/string.h @@ -0,0 +1,12 @@ +#ifndef __X86_STRING_H__ +#define __X86_STRING_H__ + +#endif /* __X86_STRING_H__ */ +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/system.h b/xen/arch/x86/include/asm/system.h new file mode 100644 index 0000000000..65e63de69a --- /dev/null +++ b/xen/arch/x86/include/asm/system.h @@ -0,0 +1,295 @@ +#ifndef __ASM_SYSTEM_H +#define __ASM_SYSTEM_H + +#include +#include +#include + +static inline void wbinvd(void) +{ + asm volatile ( "wbinvd" ::: "memory" ); +} + +static inline void wbnoinvd(void) +{ + asm volatile ( "repe; wbinvd" : : : "memory" ); +} + +static inline void clflush(const void *p) +{ + asm volatile ( "clflush %0" :: "m" (*(const char *)p) ); +} + +static inline void clflushopt(const void *p) +{ + asm volatile ( "data16 clflush %0" :: "m" (*(const char *)p) ); +} + +static inline void clwb(const void *p) +{ +#if defined(HAVE_AS_CLWB) + asm volatile ( "clwb %0" :: "m" (*(const char *)p) ); +#elif defined(HAVE_AS_XSAVEOPT) + asm volatile ( "data16 xsaveopt %0" :: "m" (*(const char *)p) ); +#else + asm volatile ( ".byte 0x66, 0x0f, 0xae, 0x32" + :: "d" (p), "m" (*(const char *)p) ); +#endif +} + +#define xchg(ptr,v) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) + +#include + +/* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway + * Note 2: xchg has side effect, so that attribute volatile is necessary, + * but generally the primitive is invalid, *ptr is output argument. --ANK + */ +static always_inline unsigned long __xchg( + unsigned long x, volatile void *ptr, int size) +{ + switch ( size ) + { + case 1: + asm volatile ( "xchg %b[x], %[ptr]" + : [x] "+q" (x), [ptr] "+m" (*(volatile uint8_t *)ptr) + :: "memory" ); + break; + case 2: + asm volatile ( "xchg %w[x], %[ptr]" + : [x] "+r" (x), [ptr] "+m" (*(volatile uint16_t *)ptr) + :: "memory" ); + break; + case 4: + asm volatile ( "xchg %k[x], %[ptr]" + : [x] "+r" (x), [ptr] "+m" (*(volatile uint32_t *)ptr) + :: "memory" ); + break; + case 8: + asm volatile ( "xchg %q[x], %[ptr]" + : [x] "+r" (x), [ptr] "+m" (*(volatile uint64_t *)ptr) + :: "memory" ); + break; + } + return x; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +static always_inline unsigned long __cmpxchg( + volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + unsigned long prev; + switch ( size ) + { + case 1: + asm volatile ( "lock cmpxchg %b[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(volatile uint8_t *)ptr) + : [new] "q" (new), "a" (old) + : "memory" ); + return prev; + case 2: + asm volatile ( "lock cmpxchg %w[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(volatile uint16_t *)ptr) + : [new] "r" (new), "a" (old) + : "memory" ); + return prev; + case 4: + asm volatile ( "lock cmpxchg %k[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(volatile uint32_t *)ptr) + : [new] "r" (new), "a" (old) + : "memory" ); + return prev; + case 8: + asm volatile ( "lock cmpxchg %q[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(volatile uint64_t *)ptr) + : [new] "r" (new), "a" (old) + : "memory" ); + return prev; + } + return old; +} + +static always_inline unsigned long cmpxchg_local_( + void *ptr, unsigned long old, unsigned long new, unsigned int size) +{ + unsigned long prev = ~old; + + switch ( size ) + { + case 1: + asm volatile ( "cmpxchg %b[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(uint8_t *)ptr) + : [new] "q" (new), "a" (old) ); + break; + case 2: + asm volatile ( "cmpxchg %w[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(uint16_t *)ptr) + : [new] "r" (new), "a" (old) ); + break; + case 4: + asm volatile ( "cmpxchg %k[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(uint32_t *)ptr) + : [new] "r" (new), "a" (old) ); + break; + case 8: + asm volatile ( "cmpxchg %q[new], %[ptr]" + : "=a" (prev), [ptr] "+m" (*(uint64_t *)ptr) + : [new] "r" (new), "a" (old) ); + break; + } + + return prev; +} + +/* + * Undefined symbol to cause link failure if a wrong size is used with + * arch_fetch_and_add(). + */ +extern unsigned long __bad_fetch_and_add_size(void); + +static always_inline unsigned long __xadd( + volatile void *ptr, unsigned long v, int size) +{ + switch ( size ) + { + case 1: + asm volatile ( "lock xadd %b[v], %[ptr]" + : [v] "+q" (v), [ptr] "+m" (*(volatile uint8_t *)ptr) + :: "memory"); + return v; + case 2: + asm volatile ( "lock xadd %w[v], %[ptr]" + : [v] "+r" (v), [ptr] "+m" (*(volatile uint16_t *)ptr) + :: "memory"); + return v; + case 4: + asm volatile ( "lock xadd %k[v], %[ptr]" + : [v] "+r" (v), [ptr] "+m" (*(volatile uint32_t *)ptr) + :: "memory"); + return v; + case 8: + asm volatile ( "lock xadd %q[v], %[ptr]" + : [v] "+r" (v), [ptr] "+m" (*(volatile uint64_t *)ptr) + :: "memory"); + + return v; + default: + return __bad_fetch_and_add_size(); + } +} + +/* + * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr. Returns + * the previous value. + * + * This is a full memory barrier. + */ +#define arch_fetch_and_add(ptr, v) \ + ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr)))) + +/* + * Mandatory barriers, for enforced ordering of reads and writes, e.g. for use + * with MMIO devices mapped with reduced cacheability. + */ +#define mb() asm volatile ( "mfence" ::: "memory" ) +#define rmb() asm volatile ( "lfence" ::: "memory" ) +#define wmb() asm volatile ( "sfence" ::: "memory" ) + +/* + * SMP barriers, for ordering of reads and writes between CPUs, most commonly + * used with shared memory. + * + * Both Intel and AMD agree that, from a programmer's viewpoint: + * Loads cannot be reordered relative to other loads. + * Stores cannot be reordered relative to other stores. + * Loads may be reordered ahead of a unaliasing stores. + * + * Refer to the vendor system programming manuals for further details. + */ +#define smp_mb() asm volatile ( "lock addl $0, -4(%%rsp)" ::: "memory" ) +#define smp_rmb() barrier() +#define smp_wmb() barrier() + +#define set_mb(var, value) do { xchg(&var, value); } while (0) +#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) + +#define smp_mb__before_atomic() do { } while (0) +#define smp_mb__after_atomic() do { } while (0) + +/** + * array_index_mask_nospec() - generate a mask that is ~0UL when the + * bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * Returns: + * 0 - (index < size) + */ +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + asm volatile ( "cmp %[size], %[index]; sbb %[mask], %[mask];" + : [mask] "=r" (mask) + : [size] "g" (size), [index] "r" (index) ); + + return mask; +} + +/* Override default implementation in nospec.h. */ +#define array_index_mask_nospec array_index_mask_nospec + +#define local_irq_disable() asm volatile ( "cli" : : : "memory" ) +#define local_irq_enable() asm volatile ( "sti" : : : "memory" ) + +/* used in the idle loop; sti takes one instruction cycle to complete */ +#define safe_halt() asm volatile ( "sti; hlt" : : : "memory" ) +/* used when interrupts are already enabled or to shutdown the processor */ +#define halt() asm volatile ( "hlt" : : : "memory" ) + +#define local_save_flags(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \ +}) +#define local_irq_save(x) \ +({ \ + local_save_flags(x); \ + local_irq_disable(); \ +}) +#define local_irq_restore(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( "pushfq\n\t" \ + "andq %0, (%%rsp)\n\t" \ + "orq %1, (%%rsp)\n\t" \ + "popfq" \ + : : "i?r" ( ~X86_EFLAGS_IF ), \ + "ri" ( (x) & X86_EFLAGS_IF ) ); \ +}) + +static inline int local_irq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !!(flags & X86_EFLAGS_IF); +} + +#define BROKEN_ACPI_Sx 0x0001 +#define BROKEN_INIT_AFTER_S1 0x0002 + +void trap_init(void); +void init_idt_traps(void); +void load_system_tables(void); +void percpu_traps_init(void); +void subarch_percpu_traps_init(void); + +#endif diff --git a/xen/arch/x86/include/asm/tboot.h b/xen/arch/x86/include/asm/tboot.h new file mode 100644 index 0000000000..bfeed1542f --- /dev/null +++ b/xen/arch/x86/include/asm/tboot.h @@ -0,0 +1,160 @@ +/* + * tboot.h: shared data structure with MLE and kernel and functions + * used by kernel for runtime support + * + * Copyright (c) 2006-2007, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __TBOOT_H__ +#define __TBOOT_H__ + +#include + +typedef struct __packed { + uint32_t data1; + uint16_t data2; + uint16_t data3; + uint16_t data4; + uint8_t data5[6]; +} uuid_t; + +/* used to communicate between tboot and the launched kernel (i.e. Xen) */ + +#define TB_KEY_SIZE 64 /* 512 bits */ + +#define MAX_TB_MAC_REGIONS 32 +typedef struct __packed { + uint64_t start; /* must be 64 byte -aligned */ + uint32_t size; /* must be 64 byte -granular */ +} tboot_mac_region_t; + +/* GAS - Generic Address Structure (ACPI 2.0+) */ +typedef struct __packed { + uint8_t space_id; + uint8_t bit_width; + uint8_t bit_offset; + uint8_t access_width; + uint64_t address; +} tboot_acpi_generic_address_t; + +typedef struct __packed { + tboot_acpi_generic_address_t pm1a_cnt_blk; + tboot_acpi_generic_address_t pm1b_cnt_blk; + tboot_acpi_generic_address_t pm1a_evt_blk; + tboot_acpi_generic_address_t pm1b_evt_blk; + uint16_t pm1a_cnt_val; + uint16_t pm1b_cnt_val; + uint64_t wakeup_vector; + uint32_t vector_width; + uint64_t kernel_s3_resume_vector; +} tboot_acpi_sleep_info_t; + +typedef struct __packed { + /* version 3+ fields: */ + uuid_t uuid; /* {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} */ + uint32_t version; /* Version number; currently supports 0.6 */ + uint32_t log_addr; /* physical addr of tb_log_t log */ + uint32_t shutdown_entry; /* entry point for tboot shutdown */ + uint32_t shutdown_type; /* type of shutdown (TB_SHUTDOWN_*) */ + tboot_acpi_sleep_info_t + acpi_sinfo; /* where kernel put acpi sleep info in Sx */ + uint32_t tboot_base; /* starting addr for tboot */ + uint32_t tboot_size; /* size of tboot */ + uint8_t num_mac_regions; /* number mem regions to MAC on S3 */ + /* contig regions memory to MAC on S3 */ + tboot_mac_region_t mac_regions[MAX_TB_MAC_REGIONS]; + /* version 4+ fields: */ + /* populated by tboot; will be encrypted */ + uint8_t s3_key[TB_KEY_SIZE]; + /* version 5+ fields: */ + uint8_t reserved_align[3]; /* used to 4byte-align num_in_wfs */ + uint32_t num_in_wfs; /* number of processors in wait-for-SIPI */ + /* version 6+ fields: */ + uint32_t flags; + uint64_t ap_wake_addr; /* phys addr of kernel/VMM SIPI vector */ + uint32_t ap_wake_trigger; /* kernel/VMM writes APIC ID to wake AP */ +} tboot_shared_t; + +#define TB_SHUTDOWN_REBOOT 0 +#define TB_SHUTDOWN_S5 1 +#define TB_SHUTDOWN_S4 2 +#define TB_SHUTDOWN_S3 3 +#define TB_SHUTDOWN_HALT 4 + +#define TB_FLAG_AP_WAKE_SUPPORT 0x00000001 /* kernel/VMM use INIT-SIPI-SIPI + if clear, ap_wake_* if set */ + +/* {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} */ +#define TBOOT_SHARED_UUID { 0x663c8dff, 0xe8b3, 0x4b82, 0xaabf, \ + { 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8 } }; + +extern tboot_shared_t *g_tboot_shared; + +#ifdef CONFIG_TBOOT +void tboot_probe(void); +void tboot_shutdown(uint32_t shutdown_type); +int tboot_in_measured_env(void); +int tboot_protect_mem_regions(void); +int tboot_parse_dmar_table(acpi_table_handler dmar_handler); +int tboot_s3_resume(void); +void tboot_s3_error(int error); +int tboot_wake_ap(int apicid, unsigned long sipi_vec); +#else +static inline void tboot_probe(void) {} +static inline void tboot_shutdown(uint32_t shutdown_type) {} +static inline int tboot_in_measured_env(void) { return 0; } +static inline int tboot_protect_mem_regions(void) { return 1; } + +static inline int tboot_parse_dmar_table(acpi_table_handler dmar_handler) +{ + return acpi_table_parse(ACPI_SIG_DMAR, dmar_handler); +} + +static inline int tboot_s3_resume(void) { return 0; } +static inline void tboot_s3_error(int error) {} +static inline int tboot_wake_ap(int apicid, unsigned long sipi_vec) +{ + return 1; +} +#endif /* CONFIG_TBOOT */ + +#endif /* __TBOOT_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/time.h b/xen/arch/x86/include/asm/time.h new file mode 100644 index 0000000000..f347311cc4 --- /dev/null +++ b/xen/arch/x86/include/asm/time.h @@ -0,0 +1,76 @@ + +#ifndef __X86_TIME_H__ +#define __X86_TIME_H__ + +#include + +/* + * PV TSC emulation modes: + * 0 = guest rdtsc/p executed natively when monotonicity can be guaranteed + * and emulated otherwise (with frequency scaled if necessary) + * 1 = guest rdtsc/p always emulated at 1GHz (kernel and user) + * 2 = guest rdtsc always executed natively (no monotonicity/frequency + * guarantees); guest rdtscp emulated at native frequency if + * unsupported by h/w, else executed natively + * 3 = Removed, was PVRDTSCP. + */ +#define TSC_MODE_DEFAULT 0 +#define TSC_MODE_ALWAYS_EMULATE 1 +#define TSC_MODE_NEVER_EMULATE 2 + +typedef u64 cycles_t; + +extern bool disable_tsc_sync; + +static inline cycles_t get_cycles(void) +{ + return rdtsc_ordered(); +} + +unsigned long +mktime (unsigned int year, unsigned int mon, + unsigned int day, unsigned int hour, + unsigned int min, unsigned int sec); + +int time_suspend(void); +int time_resume(void); + +void init_percpu_time(void); +void time_latch_stamps(void); + +struct ioreq; +int hwdom_pit_access(struct ioreq *ioreq); + +int cpu_frequency_change(u64 freq); + +void pit_broadcast_enter(void); +void pit_broadcast_exit(void); +int pit_broadcast_is_available(void); + +uint64_t acpi_pm_tick_to_ns(uint64_t ticks); +uint64_t ns_to_acpi_pm_tick(uint64_t ns); + +uint64_t tsc_ticks2ns(uint64_t ticks); + +uint64_t pv_soft_rdtsc(const struct vcpu *v, const struct cpu_user_regs *regs); +u64 gtime_to_gtsc(struct domain *d, u64 time); +u64 gtsc_to_gtime(struct domain *d, u64 tsc); + +int tsc_set_info(struct domain *d, uint32_t tsc_mode, uint64_t elapsed_nsec, + uint32_t gtsc_khz, uint32_t incarnation); + +void tsc_get_info(struct domain *d, uint32_t *tsc_mode, uint64_t *elapsed_nsec, + uint32_t *gtsc_khz, uint32_t *incarnation); + + +void force_update_vcpu_system_time(struct vcpu *v); + +bool clocksource_is_tsc(void); +int host_tsc_is_safe(void); +u64 stime2tsc(s_time_t stime); + +struct time_scale; +void set_time_scale(struct time_scale *ts, u64 ticks_per_sec); +u64 scale_delta(u64 delta, const struct time_scale *scale); + +#endif /* __X86_TIME_H__ */ diff --git a/xen/arch/x86/include/asm/trace.h b/xen/arch/x86/include/asm/trace.h new file mode 100644 index 0000000000..edef1bb099 --- /dev/null +++ b/xen/arch/x86/include/asm/trace.h @@ -0,0 +1,4 @@ +#ifndef __ASM_TRACE_H__ +#define __ASM_TRACE_H__ + +#endif /* __ASM_TRACE_H__ */ diff --git a/xen/arch/x86/include/asm/traps.h b/xen/arch/x86/include/asm/traps.h new file mode 100644 index 0000000000..ec23d3a70b --- /dev/null +++ b/xen/arch/x86/include/asm/traps.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2007, 2008 Advanced Micro Devices, Inc. + * Author: Christoph Egger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef ASM_TRAP_H +#define ASM_TRAP_H + +const char *trapstr(unsigned int trapnr); + +#endif /* ASM_TRAP_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/types.h b/xen/arch/x86/include/asm/types.h new file mode 100644 index 0000000000..7817132048 --- /dev/null +++ b/xen/arch/x86/include/asm/types.h @@ -0,0 +1,50 @@ +#ifndef __X86_TYPES_H__ +#define __X86_TYPES_H__ + +#ifndef __ASSEMBLY__ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __signed__ long __s64; +typedef unsigned long __u64; +#endif + +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +typedef signed long s64; +typedef unsigned long u64; +typedef unsigned long paddr_t; +#define INVALID_PADDR (~0UL) +#define PRIpaddr "016lx" + +#if defined(__SIZE_TYPE__) +typedef __SIZE_TYPE__ size_t; +#else +typedef unsigned long size_t; +#endif +typedef signed long ssize_t; + +#if defined(__PTRDIFF_TYPE__) +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#else +typedef signed long ptrdiff_t; +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __X86_TYPES_H__ */ diff --git a/xen/arch/x86/include/asm/uaccess.h b/xen/arch/x86/include/asm/uaccess.h new file mode 100644 index 0000000000..684fccd95c --- /dev/null +++ b/xen/arch/x86/include/asm/uaccess.h @@ -0,0 +1,429 @@ + +#ifndef __X86_UACCESS_H__ +#define __X86_UACCESS_H__ + +#include +#include +#include +#include + +#include + +unsigned int copy_to_guest_pv(void __user *to, const void *from, + unsigned int len); +unsigned int clear_guest_pv(void __user *to, unsigned int len); +unsigned int copy_from_guest_pv(void *to, const void __user *from, + unsigned int len); + +/* Handles exceptions in both to and from, but doesn't do access_ok */ +unsigned int copy_to_guest_ll(void __user*to, const void *from, unsigned int n); +unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int n); +unsigned int copy_to_unsafe_ll(void *to, const void *from, unsigned int n); +unsigned int copy_from_unsafe_ll(void *to, const void *from, unsigned int n); + +extern long __get_user_bad(void); +extern void __put_user_bad(void); + +#define UA_KEEP(args...) args +#define UA_DROP(args...) + +/** + * get_guest: - Get a simple variable from guest space. + * @x: Variable to store result. + * @ptr: Source address, in guest space. + * + * This macro load a single simple variable from guest space. + * It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_guest(x, ptr) get_guest_check(x, ptr, sizeof(*(ptr))) + +/** + * put_guest: - Write a simple value into guest space. + * @x: Value to store in guest space. + * @ptr: Destination address, in guest space. + * + * This macro stores a single simple value from to guest space. + * It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_guest(x, ptr) \ + put_guest_check((__typeof__(*(ptr)))(x), ptr, sizeof(*(ptr))) + +/** + * __get_guest: - Get a simple variable from guest space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in guest space. + * + * This macro copies a single simple variable from guest space to hypervisor + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_guest(x, ptr) get_guest_nocheck(x, ptr, sizeof(*(ptr))) + +/** + * __put_guest: - Write a simple value into guest space, with less checking. + * @x: Value to store in guest space. + * @ptr: Destination address, in guest space. + * + * This macro copies a single simple value from hypervisor space to guest + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + */ +#define __put_guest(x, ptr) \ + put_guest_nocheck((__typeof__(*(ptr)))(x), ptr, sizeof(*(ptr))) + +#define put_unsafe(x, ptr) \ +({ \ + int err_; \ + put_unsafe_size(x, ptr, sizeof(*(ptr)), UA_DROP, err_, -EFAULT);\ + err_; \ +}) + +#define put_guest_nocheck(x, ptr, size) \ +({ \ + int err_; \ + put_guest_size(x, ptr, size, err_, -EFAULT); \ + err_; \ +}) + +#define put_guest_check(x, ptr, size) \ +({ \ + __typeof__(*(ptr)) __user *ptr_ = (ptr); \ + __typeof__(size) size_ = (size); \ + access_ok(ptr_, size_) ? put_guest_nocheck(x, ptr_, size_) \ + : -EFAULT; \ +}) + +#define get_unsafe(x, ptr) \ +({ \ + int err_; \ + get_unsafe_size(x, ptr, sizeof(*(ptr)), UA_DROP, err_, -EFAULT);\ + err_; \ +}) + +#define get_guest_nocheck(x, ptr, size) \ +({ \ + int err_; \ + get_guest_size(x, ptr, size, err_, -EFAULT); \ + err_; \ +}) + +#define get_guest_check(x, ptr, size) \ +({ \ + __typeof__(*(ptr)) __user *ptr_ = (ptr); \ + __typeof__(size) size_ = (size); \ + access_ok(ptr_, size_) ? get_guest_nocheck(x, ptr_, size_) \ + : -EFAULT; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(const struct __large_struct *)(x)) + +/* + * Tell gcc we read from memory instead of writing: this is because + * we do not write to any memory gcc knows about, so there are no + * aliasing issues. + */ +#define put_unsafe_asm(x, addr, GUARD, err, itype, rtype, ltype, errret) \ + __asm__ __volatile__( \ + GUARD( \ + " guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \ + ) \ + "1: mov"itype" %"rtype"[val], (%[ptr])\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %[errno], %[ret]\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : [ret] "+r" (err), [ptr] "=&r" (dummy_) \ + GUARD(, [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_)) \ + : [val] ltype (x), "m" (__m(addr)), \ + "[ptr]" (addr), [errno] "i" (errret)) + +#define get_unsafe_asm(x, addr, GUARD, err, rtype, ltype, errret) \ + __asm__ __volatile__( \ + GUARD( \ + " guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \ + ) \ + "1: mov (%[ptr]), %"rtype"[val]\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %[errno], %[ret]\n" \ + " xor %k[val], %k[val]\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : [ret] "+r" (err), [val] ltype (x), \ + [ptr] "=&r" (dummy_) \ + GUARD(, [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_)) \ + : "m" (__m(addr)), "[ptr]" (addr), \ + [errno] "i" (errret)) + +#define put_unsafe_size(x, ptr, size, grd, retval, errret) \ +do { \ + retval = 0; \ + stac(); \ + switch ( size ) \ + { \ + long dummy_; \ + case 1: \ + put_unsafe_asm(x, ptr, grd, retval, "b", "b", "iq", errret); \ + break; \ + case 2: \ + put_unsafe_asm(x, ptr, grd, retval, "w", "w", "ir", errret); \ + break; \ + case 4: \ + put_unsafe_asm(x, ptr, grd, retval, "l", "k", "ir", errret); \ + break; \ + case 8: \ + put_unsafe_asm(x, ptr, grd, retval, "q", "", "ir", errret); \ + break; \ + default: __put_user_bad(); \ + } \ + clac(); \ +} while ( false ) + +#define put_guest_size(x, ptr, size, retval, errret) \ + put_unsafe_size(x, ptr, size, UA_KEEP, retval, errret) + +#define get_unsafe_size(x, ptr, size, grd, retval, errret) \ +do { \ + retval = 0; \ + stac(); \ + switch ( size ) \ + { \ + long dummy_; \ + case 1: get_unsafe_asm(x, ptr, grd, retval, "b", "=q", errret); break; \ + case 2: get_unsafe_asm(x, ptr, grd, retval, "w", "=r", errret); break; \ + case 4: get_unsafe_asm(x, ptr, grd, retval, "k", "=r", errret); break; \ + case 8: get_unsafe_asm(x, ptr, grd, retval, "", "=r", errret); break; \ + default: __get_user_bad(); \ + } \ + clac(); \ +} while ( false ) + +#define get_guest_size(x, ptr, size, retval, errret) \ + get_unsafe_size(x, ptr, size, UA_KEEP, retval, errret) + +/** + * __copy_to_guest_pv: - Copy a block of data into guest space, with less + * checking + * @to: Destination address, in guest space. + * @from: Source address, in hypervisor space. + * @n: Number of bytes to copy. + * + * Copy data from hypervisor space to guest space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +static always_inline unsigned long +__copy_to_guest_pv(void __user *to, const void *from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + put_guest_size(*(const uint8_t *)from, to, 1, ret, 1); + return ret; + case 2: + put_guest_size(*(const uint16_t *)from, to, 2, ret, 2); + return ret; + case 4: + put_guest_size(*(const uint32_t *)from, to, 4, ret, 4); + return ret; + case 8: + put_guest_size(*(const uint64_t *)from, to, 8, ret, 8); + return ret; + } + } + return copy_to_guest_ll(to, from, n); +} + +/** + * __copy_from_guest_pv: - Copy a block of data from guest space, with less + * checking + * @to: Destination address, in hypervisor space. + * @from: Source address, in guest space. + * @n: Number of bytes to copy. + * + * Copy data from guest space to hypervisor space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +static always_inline unsigned long +__copy_from_guest_pv(void *to, const void __user *from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + get_guest_size(*(uint8_t *)to, from, 1, ret, 1); + return ret; + case 2: + get_guest_size(*(uint16_t *)to, from, 2, ret, 2); + return ret; + case 4: + get_guest_size(*(uint32_t *)to, from, 4, ret, 4); + return ret; + case 8: + get_guest_size(*(uint64_t *)to, from, 8, ret, 8); + return ret; + } + } + return copy_from_guest_ll(to, from, n); +} + +/** + * copy_to_unsafe: - Copy a block of data to unsafe space, with exception + * checking + * @to: Unsafe destination address. + * @from: Safe source address, in hypervisor space. + * @n: Number of bytes to copy. + * + * Copy data from hypervisor space to a potentially unmapped area. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +static always_inline unsigned int +copy_to_unsafe(void __user *to, const void *from, unsigned int n) +{ + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + put_unsafe_size(*(const uint8_t *)from, to, 1, UA_DROP, ret, 1); + return ret; + case 2: + put_unsafe_size(*(const uint16_t *)from, to, 2, UA_DROP, ret, 2); + return ret; + case 4: + put_unsafe_size(*(const uint32_t *)from, to, 4, UA_DROP, ret, 4); + return ret; + case 8: + put_unsafe_size(*(const uint64_t *)from, to, 8, UA_DROP, ret, 8); + return ret; + } + } + + return copy_to_unsafe_ll(to, from, n); +} + +/** + * copy_from_unsafe: - Copy a block of data from unsafe space, with exception + * checking + * @to: Safe destination address, in hypervisor space. + * @from: Unsafe source address. + * @n: Number of bytes to copy. + * + * Copy data from a potentially unmapped area space to hypervisor space. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +static always_inline unsigned int +copy_from_unsafe(void *to, const void __user *from, unsigned int n) +{ + if ( __builtin_constant_p(n) ) + { + unsigned long ret; + + switch ( n ) + { + case 1: + get_unsafe_size(*(uint8_t *)to, from, 1, UA_DROP, ret, 1); + return ret; + case 2: + get_unsafe_size(*(uint16_t *)to, from, 2, UA_DROP, ret, 2); + return ret; + case 4: + get_unsafe_size(*(uint32_t *)to, from, 4, UA_DROP, ret, 4); + return ret; + case 8: + get_unsafe_size(*(uint64_t *)to, from, 8, UA_DROP, ret, 8); + return ret; + } + } + + return copy_from_unsafe_ll(to, from, n); +} + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + s32 addr, cont; +}; +extern struct exception_table_entry __start___ex_table[]; +extern struct exception_table_entry __stop___ex_table[]; +extern struct exception_table_entry __start___pre_ex_table[]; +extern struct exception_table_entry __stop___pre_ex_table[]; + +union stub_exception_token { + struct { + uint16_t ec; + uint8_t trapnr; + } fields; + unsigned long raw; +}; + +extern unsigned long search_exception_table(const struct cpu_user_regs *regs); +extern void sort_exception_tables(void); +extern void sort_exception_table(struct exception_table_entry *start, + const struct exception_table_entry *stop); + +#endif /* __X86_UACCESS_H__ */ diff --git a/xen/arch/x86/include/asm/unaligned.h b/xen/arch/x86/include/asm/unaligned.h new file mode 100644 index 0000000000..6070801d4a --- /dev/null +++ b/xen/arch/x86/include/asm/unaligned.h @@ -0,0 +1,6 @@ +#ifndef __ASM_UNALIGNED_H__ +#define __ASM_UNALIGNED_H__ + +#include + +#endif /* __ASM_UNALIGNED_H__ */ diff --git a/xen/arch/x86/include/asm/vm_event.h b/xen/arch/x86/include/asm/vm_event.h new file mode 100644 index 0000000000..0756124075 --- /dev/null +++ b/xen/arch/x86/include/asm/vm_event.h @@ -0,0 +1,59 @@ +/* + * vm_event.h: architecture specific vm_event handling routines + * + * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + */ + +#ifndef __ASM_X86_VM_EVENT_H__ +#define __ASM_X86_VM_EVENT_H__ + +#include +#include + +/* + * Should we emulate the next matching instruction on VCPU resume + * after a vm_event? + */ +struct arch_vm_event { + uint32_t emulate_flags; + union { + struct vm_event_emul_read_data read; + struct vm_event_emul_insn_data insn; + } emul; + struct monitor_write_data write_data; + struct vm_event_regs_x86 gprs; + bool set_gprs; + /* A sync vm_event has been sent and we're not done handling it. */ + bool sync_event; + /* Send mem access events from emulator */ + bool send_event; +}; + +int vm_event_init_domain(struct domain *d); + +void vm_event_cleanup_domain(struct domain *d); + +void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, + vm_event_response_t *rsp); + +void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp); + +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp); + +void vm_event_sync_event(struct vcpu *v, bool value); + +void vm_event_reset_vmtrace(struct vcpu *v); + +#endif /* __ASM_X86_VM_EVENT_H__ */ diff --git a/xen/arch/x86/include/asm/vpmu.h b/xen/arch/x86/include/asm/vpmu.h new file mode 100644 index 0000000000..e5709bd44a --- /dev/null +++ b/xen/arch/x86/include/asm/vpmu.h @@ -0,0 +1,140 @@ +/* + * vpmu.h: PMU virtualization for HVM domain. + * + * Copyright (c) 2007, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; If not, see . + * + * Author: Haitao Shan + */ + +#ifndef __ASM_X86_HVM_VPMU_H_ +#define __ASM_X86_HVM_VPMU_H_ + +#include + +#define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu) +#define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu) +#define vpmu_available(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_AVAILABLE) + +#define MSR_TYPE_COUNTER 0 +#define MSR_TYPE_CTRL 1 +#define MSR_TYPE_GLOBAL 2 +#define MSR_TYPE_ARCH_COUNTER 3 +#define MSR_TYPE_ARCH_CTRL 4 + +/* Start of PMU register bank */ +#define vpmu_reg_pointer(ctxt, offset) ((void *)((uintptr_t)ctxt + \ + (uintptr_t)ctxt->offset)) + +/* Arch specific operations shared by all vpmus */ +struct arch_vpmu_ops { + int (*initialise)(struct vcpu *v); + int (*do_wrmsr)(unsigned int msr, uint64_t msr_content); + int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content); + int (*do_interrupt)(struct cpu_user_regs *regs); + void (*arch_vpmu_destroy)(struct vcpu *v); + int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest); + int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest); + void (*arch_vpmu_dump)(const struct vcpu *); +}; + +const struct arch_vpmu_ops *core2_vpmu_init(void); +const struct arch_vpmu_ops *amd_vpmu_init(void); +const struct arch_vpmu_ops *hygon_vpmu_init(void); + +struct vpmu_struct { + u32 flags; + u32 last_pcpu; + u32 hw_lapic_lvtpc; + void *context; /* May be shared with PV guest */ + void *priv_context; /* hypervisor-only */ + struct xen_pmu_data *xenpmu_data; + spinlock_t vpmu_lock; +}; + +/* VPMU states */ +#define VPMU_INITIALIZED 0x0001 +#define VPMU_CONTEXT_ALLOCATED 0x0002 +#define VPMU_CONTEXT_LOADED 0x0004 +#define VPMU_RUNNING 0x0008 +#define VPMU_CONTEXT_SAVE 0x0010 /* Force context save */ +#define VPMU_FROZEN 0x0020 /* Stop counters while VCPU is not running */ +#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x0040 +/* PV(H) guests: VPMU registers are accessed by guest from shared page */ +#define VPMU_CACHED 0x0080 +#define VPMU_AVAILABLE 0x0100 + +/* Intel-specific VPMU features */ +#define VPMU_CPU_HAS_DS 0x1000 /* Has Debug Store */ +#define VPMU_CPU_HAS_BTS 0x2000 /* Has Branch Trace Store */ + +static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask) +{ + vpmu->flags |= mask; +} +static inline void vpmu_reset(struct vpmu_struct *vpmu, const u32 mask) +{ + vpmu->flags &= ~mask; +} +static inline void vpmu_clear(struct vpmu_struct *vpmu) +{ + /* VPMU_AVAILABLE should be altered by get/put_vpmu(). */ + vpmu->flags &= VPMU_AVAILABLE; +} +static inline bool_t vpmu_is_set(const struct vpmu_struct *vpmu, const u32 mask) +{ + return !!(vpmu->flags & mask); +} +static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu, + const u32 mask) +{ + return !!((vpmu->flags & mask) == mask); +} + +void vpmu_lvtpc_update(uint32_t val); +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, bool is_write); +void vpmu_do_interrupt(struct cpu_user_regs *regs); +void vpmu_initialise(struct vcpu *v); +void vpmu_destroy(struct vcpu *v); +void vpmu_save(struct vcpu *v); +int vpmu_load(struct vcpu *v, bool_t from_guest); +void vpmu_dump(struct vcpu *v); + +static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) +{ + return vpmu_do_msr(msr, &msr_content, true /* write */); +} +static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) +{ + return vpmu_do_msr(msr, msr_content, false /* read */); +} + +extern unsigned int vpmu_mode; +extern unsigned int vpmu_features; + +/* Context switch */ +static inline void vpmu_switch_from(struct vcpu *prev) +{ + if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) ) + vpmu_save(prev); +} + +static inline void vpmu_switch_to(struct vcpu *next) +{ + if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) ) + vpmu_load(next, 0); +} + +#endif /* __ASM_X86_HVM_VPMU_H_*/ + diff --git a/xen/arch/x86/include/asm/x86-defns.h b/xen/arch/x86/include/asm/x86-defns.h new file mode 100644 index 0000000000..28628807cb --- /dev/null +++ b/xen/arch/x86/include/asm/x86-defns.h @@ -0,0 +1,156 @@ +#ifndef __XEN_X86_DEFNS_H__ +#define __XEN_X86_DEFNS_H__ + +/* + * EFLAGS bits + */ +#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ +#define X86_EFLAGS_MBS 0x00000002 /* Resvd bit */ +#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ +#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ +#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ +#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ +#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ +#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ +#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ +#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ +#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ +#define X86_EFLAGS_NT 0x00004000 /* Nested Task */ +#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ +#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ +#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ +#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ +#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ +#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ + +#define X86_EFLAGS_ARITH_MASK \ + (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \ + X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF) + +/* + * Intel CPU flags in CR0 + */ +#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */ +#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */ +#define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */ +#define X86_CR0_TS 0x00000008 /* Task Switched (RW) */ +#define X86_CR0_ET 0x00000010 /* Extension type (RO) */ +#define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */ +#define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */ +#define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */ +#define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */ +#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */ +#define X86_CR0_PG 0x80000000 /* Paging (RW) */ + +/* + * Intel CPU flags in CR3 + */ +#define X86_CR3_NOFLUSH (_AC(1, ULL) << 63) +#define X86_CR3_ADDR_MASK (PAGE_MASK & PADDR_MASK) +#define X86_CR3_PCID_MASK _AC(0x0fff, ULL) /* Mask for PCID */ + +/* + * Intel CPU features in CR4 + */ +#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */ +#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */ +#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */ +#define X86_CR4_DE 0x00000008 /* enable debugging extensions */ +#define X86_CR4_PSE 0x00000010 /* enable page size extensions */ +#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */ +#define X86_CR4_MCE 0x00000040 /* Machine check enable */ +#define X86_CR4_PGE 0x00000080 /* enable global pages */ +#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */ +#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ +#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ +#define X86_CR4_UMIP 0x00000800 /* enable UMIP */ +#define X86_CR4_LA57 0x00001000 /* enable 5-level paging */ +#define X86_CR4_VMXE 0x00002000 /* enable VMX */ +#define X86_CR4_SMXE 0x00004000 /* enable SMX */ +#define X86_CR4_FSGSBASE 0x00010000 /* enable {rd,wr}{fs,gs}base */ +#define X86_CR4_PCIDE 0x00020000 /* enable PCID */ +#define X86_CR4_OSXSAVE 0x00040000 /* enable XSAVE/XRSTOR */ +#define X86_CR4_SMEP 0x00100000 /* enable SMEP */ +#define X86_CR4_SMAP 0x00200000 /* enable SMAP */ +#define X86_CR4_PKE 0x00400000 /* enable PKE */ +#define X86_CR4_CET 0x00800000 /* Control-flow Enforcement Technology */ + +/* + * XSTATE component flags in XCR0 + */ +#define X86_XCR0_FP_POS 0 +#define X86_XCR0_FP (1ULL << X86_XCR0_FP_POS) +#define X86_XCR0_SSE_POS 1 +#define X86_XCR0_SSE (1ULL << X86_XCR0_SSE_POS) +#define X86_XCR0_YMM_POS 2 +#define X86_XCR0_YMM (1ULL << X86_XCR0_YMM_POS) +#define X86_XCR0_BNDREGS_POS 3 +#define X86_XCR0_BNDREGS (1ULL << X86_XCR0_BNDREGS_POS) +#define X86_XCR0_BNDCSR_POS 4 +#define X86_XCR0_BNDCSR (1ULL << X86_XCR0_BNDCSR_POS) +#define X86_XCR0_OPMASK_POS 5 +#define X86_XCR0_OPMASK (1ULL << X86_XCR0_OPMASK_POS) +#define X86_XCR0_ZMM_POS 6 +#define X86_XCR0_ZMM (1ULL << X86_XCR0_ZMM_POS) +#define X86_XCR0_HI_ZMM_POS 7 +#define X86_XCR0_HI_ZMM (1ULL << X86_XCR0_HI_ZMM_POS) +#define X86_XCR0_PKRU_POS 9 +#define X86_XCR0_PKRU (1ULL << X86_XCR0_PKRU_POS) +#define X86_XCR0_LWP_POS 62 +#define X86_XCR0_LWP (1ULL << X86_XCR0_LWP_POS) + +/* + * Debug status flags in DR6. + */ +#define X86_DR6_DEFAULT 0xffff0ff0 /* Default %dr6 value. */ + +/* + * Debug control flags in DR7. + */ +#define X86_DR7_DEFAULT 0x00000400 /* Default %dr7 value. */ + +/* + * Invalidation types for the INVPCID instruction. + */ +#define X86_INVPCID_INDIV_ADDR 0 +#define X86_INVPCID_SINGLE_CTXT 1 +#define X86_INVPCID_ALL_INCL_GLOBAL 2 +#define X86_INVPCID_ALL_NON_GLOBAL 3 + +#define X86_NR_VECTORS 256 + +/* Exception Vectors */ +#define X86_EXC_DE 0 /* Divide Error */ +#define X86_EXC_DB 1 /* Debug Exception */ +#define X86_EXC_NMI 2 /* NMI */ +#define X86_EXC_BP 3 /* Breakpoint */ +#define X86_EXC_OF 4 /* Overflow */ +#define X86_EXC_BR 5 /* BOUND Range */ +#define X86_EXC_UD 6 /* Invalid Opcode */ +#define X86_EXC_NM 7 /* Device Not Available */ +#define X86_EXC_DF 8 /* Double Fault */ +#define X86_EXC_CSO 9 /* Coprocessor Segment Overrun */ +#define X86_EXC_TS 10 /* Invalid TSS */ +#define X86_EXC_NP 11 /* Segment Not Present */ +#define X86_EXC_SS 12 /* Stack-Segment Fault */ +#define X86_EXC_GP 13 /* General Porection Fault */ +#define X86_EXC_PF 14 /* Page Fault */ +#define X86_EXC_SPV 15 /* PIC Spurious Interrupt Vector */ +#define X86_EXC_MF 16 /* Maths fault (x87 FPU) */ +#define X86_EXC_AC 17 /* Alignment Check */ +#define X86_EXC_MC 18 /* Machine Check */ +#define X86_EXC_XM 19 /* SIMD Exception */ +#define X86_EXC_VE 20 /* Virtualisation Exception */ +#define X86_EXC_CP 21 /* Control-flow Protection */ +#define X86_EXC_HV 28 /* Hypervisor Injection */ +#define X86_EXC_VC 29 /* VMM Communication */ +#define X86_EXC_SX 30 /* Security Exception */ + +/* Bitmap of exceptions which have error codes. */ +#define X86_EXC_HAVE_EC \ + ((1u << X86_EXC_DF) | (1u << X86_EXC_TS) | (1u << X86_EXC_NP) | \ + (1u << X86_EXC_SS) | (1u << X86_EXC_GP) | (1u << X86_EXC_PF) | \ + (1u << X86_EXC_AC) | (1u << X86_EXC_CP) | \ + (1u << X86_EXC_VC) | (1u << X86_EXC_SX)) + +#endif /* __XEN_X86_DEFNS_H__ */ diff --git a/xen/arch/x86/include/asm/x86-vendors.h b/xen/arch/x86/include/asm/x86-vendors.h new file mode 100644 index 0000000000..0a37024cbd --- /dev/null +++ b/xen/arch/x86/include/asm/x86-vendors.h @@ -0,0 +1,39 @@ +#ifndef __XEN_X86_VENDORS_H__ +#define __XEN_X86_VENDORS_H__ + +/* + * CPU vendor IDs + * + * - X86_VENDOR_* are Xen-internal identifiers. The order is arbitrary, but + * values form a bitmap so vendor checks can be made against multiple + * vendors at once. + * - X86_VENDOR_*_E?X are architectural information from CPUID leaf 0 + */ +#define X86_VENDOR_UNKNOWN 0 + +#define X86_VENDOR_INTEL (1 << 0) +#define X86_VENDOR_INTEL_EBX 0x756e6547U /* "GenuineIntel" */ +#define X86_VENDOR_INTEL_ECX 0x6c65746eU +#define X86_VENDOR_INTEL_EDX 0x49656e69U + +#define X86_VENDOR_AMD (1 << 1) +#define X86_VENDOR_AMD_EBX 0x68747541U /* "AuthenticAMD" */ +#define X86_VENDOR_AMD_ECX 0x444d4163U +#define X86_VENDOR_AMD_EDX 0x69746e65U + +#define X86_VENDOR_CENTAUR (1 << 2) +#define X86_VENDOR_CENTAUR_EBX 0x746e6543U /* "CentaurHauls" */ +#define X86_VENDOR_CENTAUR_ECX 0x736c7561U +#define X86_VENDOR_CENTAUR_EDX 0x48727561U + +#define X86_VENDOR_SHANGHAI (1 << 3) +#define X86_VENDOR_SHANGHAI_EBX 0x68532020U /* " Shanghai " */ +#define X86_VENDOR_SHANGHAI_ECX 0x20206961U +#define X86_VENDOR_SHANGHAI_EDX 0x68676e61U + +#define X86_VENDOR_HYGON (1 << 4) +#define X86_VENDOR_HYGON_EBX 0x6f677948U /* "HygonGenuine" */ +#define X86_VENDOR_HYGON_ECX 0x656e6975U +#define X86_VENDOR_HYGON_EDX 0x6e65476eU + +#endif /* __XEN_X86_VENDORS_H__ */ diff --git a/xen/arch/x86/include/asm/x86_64/efibind.h b/xen/arch/x86/include/asm/x86_64/efibind.h new file mode 100644 index 0000000000..ddcfae07ec --- /dev/null +++ b/xen/arch/x86/include/asm/x86_64/efibind.h @@ -0,0 +1,280 @@ +/*++ + +Copyright (c) 1998 Intel Corporation + +Module Name: + + efefind.h + +Abstract: + + EFI to compile bindings + + + + +Revision History + +--*/ + +#ifndef __GNUC__ +#pragma pack() +#endif + +// +// Basic int types of various widths +// + +#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L ) + + // No ANSI C 1999/2000 stdint.h integer width declarations + + #if defined(_MSC_EXTENSIONS) + + // Use Microsoft C compiler integer width declarations + + typedef unsigned __int64 uint64_t; + typedef __int64 int64_t; + typedef unsigned __int32 uint32_t; + typedef __int32 int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef char int8_t; + #elif defined(__GNUC__) + typedef unsigned long long uint64_t __attribute__((aligned (8))); + typedef long long int64_t __attribute__((aligned (8))); + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef char int8_t; + #elif defined(UNIX_LP64) + + /* Use LP64 programming model from C_FLAGS for integer width declarations */ + + typedef unsigned long uint64_t; + typedef long int64_t; + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef char int8_t; + #else + + /* Assume P64 programming model from C_FLAGS for integer width declarations */ + + typedef unsigned long long uint64_t __attribute__((aligned (8))); + typedef long long int64_t __attribute__((aligned (8))); + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef char int8_t; + #endif +#endif + +// +// Basic EFI types of various widths +// + +#ifndef __WCHAR_TYPE__ +# define __WCHAR_TYPE__ short +#endif + +typedef uint64_t UINT64; +typedef int64_t INT64; + +#ifndef _BASETSD_H_ + typedef uint32_t UINT32; + typedef int32_t INT32; +#endif + +typedef uint16_t UINT16; +typedef int16_t INT16; +typedef uint8_t UINT8; +typedef int8_t INT8; +typedef __WCHAR_TYPE__ WCHAR; + +#undef VOID +#define VOID void + + +typedef int64_t INTN; +typedef uint64_t UINTN; + +#ifdef EFI_NT_EMULATOR + #define POST_CODE(_Data) +#else + #ifdef EFI_DEBUG +#define POST_CODE(_Data) __asm mov eax,(_Data) __asm out 0x80,al + #else + #define POST_CODE(_Data) + #endif +#endif + +#define EFIERR(a) (0x8000000000000000 | a) +#define EFI_ERROR_MASK 0x8000000000000000 +#define EFIERR_OEM(a) (0xc000000000000000 | a) + + +#define BAD_POINTER 0xFBFBFBFBFBFBFBFB +#define MAX_ADDRESS 0xFFFFFFFFFFFFFFFF + +#ifdef EFI_NT_EMULATOR + #define BREAKPOINT() __asm { int 3 } +#else + #define BREAKPOINT() while (TRUE); // Make it hang on Bios[Dbg]32 +#endif + +// +// Pointers must be aligned to these address to function +// + +#define MIN_ALIGNMENT_SIZE 4 + +#define ALIGN_VARIABLE(Value ,Adjustment) \ + (UINTN)Adjustment = 0; \ + if((UINTN)Value % MIN_ALIGNMENT_SIZE) \ + (UINTN)Adjustment = MIN_ALIGNMENT_SIZE - ((UINTN)Value % MIN_ALIGNMENT_SIZE); \ + Value = (UINTN)Value + (UINTN)Adjustment + + +// +// Define macros to build data structure signatures from characters. +// + +#define EFI_SIGNATURE_16(A,B) ((A) | (B<<8)) +#define EFI_SIGNATURE_32(A,B,C,D) (EFI_SIGNATURE_16(A,B) | (EFI_SIGNATURE_16(C,D) << 16)) +#define EFI_SIGNATURE_64(A,B,C,D,E,F,G,H) (EFI_SIGNATURE_32(A,B,C,D) | ((UINT64)(EFI_SIGNATURE_32(E,F,G,H)) << 32)) +// +// To export & import functions in the EFI emulator environment +// + +#ifdef EFI_NT_EMULATOR + #define EXPORTAPI __declspec( dllexport ) +#else + #define EXPORTAPI +#endif + + +// +// EFIAPI - prototype calling convention for EFI function pointers +// BOOTSERVICE - prototype for implementation of a boot service interface +// RUNTIMESERVICE - prototype for implementation of a runtime service interface +// RUNTIMEFUNCTION - prototype for implementation of a runtime function that is not a service +// RUNTIME_CODE - pragma macro for declaring runtime code +// + +#ifndef EFIAPI // Forces EFI calling conventions reguardless of compiler options + #ifdef _MSC_EXTENSIONS + #define EFIAPI __cdecl // Force C calling convention for Microsoft C compiler + #elif __clang__ || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4) + #define EFIAPI __attribute__((__ms_abi__)) // Force Microsoft ABI + #else + #define EFIAPI // Substitute expresion to force C calling convention + #endif +#endif + +#define BOOTSERVICE +//#define RUNTIMESERVICE(proto,a) alloc_text("rtcode",a); proto a +//#define RUNTIMEFUNCTION(proto,a) alloc_text("rtcode",a); proto a +#define RUNTIMESERVICE +#define RUNTIMEFUNCTION + + +#define RUNTIME_CODE(a) alloc_text("rtcode", a) +#define BEGIN_RUNTIME_DATA() data_seg("rtdata") +#define END_RUNTIME_DATA() data_seg("") + +#define VOLATILE volatile + +#define MEMORY_FENCE() + +#ifdef EFI_NT_EMULATOR + +// +// To help ensure proper coding of integrated drivers, they are +// compiled as DLLs. In NT they require a dll init entry pointer. +// The macro puts a stub entry point into the DLL so it will load. +// + +#define EFI_DRIVER_ENTRY_POINT(InitFunction) \ + UINTN \ + __stdcall \ + _DllMainCRTStartup ( \ + UINTN Inst, \ + UINTN reason_for_call, \ + VOID *rserved \ + ) \ + { \ + return 1; \ + } \ + \ + int \ + EXPORTAPI \ + __cdecl \ + InitializeDriver ( \ + void *ImageHandle, \ + void *SystemTable \ + ) \ + { \ + return InitFunction(ImageHandle, SystemTable); \ + } + + + #define LOAD_INTERNAL_DRIVER(_if, type, name, entry) \ + (_if)->LoadInternal(type, name, NULL) + +#else // EFI_NT_EMULATOR + +// +// When build similiar to FW, then link everything together as +// one big module. +// + + #define EFI_DRIVER_ENTRY_POINT(InitFunction) \ + UINTN \ + InitializeDriver ( \ + VOID *ImageHandle, \ + VOID *SystemTable \ + ) \ + { \ + return InitFunction(ImageHandle, \ + SystemTable); \ + } \ + \ + EFI_STATUS efi_main( \ + EFI_HANDLE image, \ + EFI_SYSTEM_TABLE *systab \ + ) __attribute__((weak, \ + alias ("InitializeDriver"))); + + #define LOAD_INTERNAL_DRIVER(_if, type, name, entry) \ + (_if)->LoadInternal(type, name, entry) + +#endif // EFI_FW_NT + +// +// Some compilers don't support the forward reference construct: +// typedef struct XXXXX +// +// The following macro provide a workaround for such cases. +// +#ifdef NO_INTERFACE_DECL +#define INTERFACE_DECL(x) +#else +#ifdef __GNUC__ +#define INTERFACE_DECL(x) struct x +#else +#define INTERFACE_DECL(x) typedef struct x +#endif +#endif + +#ifdef _MSC_EXTENSIONS +#pragma warning ( disable : 4731 ) // Suppress warnings about modification of EBP +#endif + diff --git a/xen/arch/x86/include/asm/x86_64/elf.h b/xen/arch/x86/include/asm/x86_64/elf.h new file mode 100644 index 0000000000..00227e0e12 --- /dev/null +++ b/xen/arch/x86/include/asm/x86_64/elf.h @@ -0,0 +1,85 @@ +#ifndef __X86_64_ELF_H__ +#define __X86_64_ELF_H__ + +#include +#include + +typedef struct { + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long rbp; + unsigned long rbx; + unsigned long r11; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long rax; + unsigned long rcx; + unsigned long rdx; + unsigned long rsi; + unsigned long rdi; + unsigned long orig_rax; + unsigned long rip; + unsigned long cs; + unsigned long rflags; + unsigned long rsp; + unsigned long ss; + unsigned long thread_fs; + unsigned long thread_gs; + unsigned long ds; + unsigned long es; + unsigned long fs; + unsigned long gs; +} ELF_Gregset; + +static inline void elf_core_save_regs(ELF_Gregset *core_regs, + crash_xen_core_t *xen_core_regs) +{ + asm ( "movq %%r15, %0" : "=m" (core_regs->r15) ); + asm ( "movq %%r14, %0" : "=m" (core_regs->r14) ); + asm ( "movq %%r13, %0" : "=m" (core_regs->r13) ); + asm ( "movq %%r12, %0" : "=m" (core_regs->r12) ); + asm ( "movq %%rbp, %0" : "=m" (core_regs->rbp) ); + asm ( "movq %%rbx, %0" : "=m" (core_regs->rbx) ); + asm ( "movq %%r11, %0" : "=m" (core_regs->r11) ); + asm ( "movq %%r10, %0" : "=m" (core_regs->r10) ); + asm ( "movq %%r9, %0" : "=m" (core_regs->r9) ); + asm ( "movq %%r8, %0" : "=m" (core_regs->r8) ); + asm ( "movq %%rax, %0" : "=m" (core_regs->rax) ); + asm ( "movq %%rcx, %0" : "=m" (core_regs->rcx) ); + asm ( "movq %%rdx, %0" : "=m" (core_regs->rdx) ); + asm ( "movq %%rsi, %0" : "=m" (core_regs->rsi) ); + asm ( "movq %%rdi, %0" : "=m" (core_regs->rdi) ); + + /* orig_rax not filled in for now */ + asm ( "call 0f; 0: popq %0" : "=m" (core_regs->rip) ); + core_regs->cs = read_sreg(cs); + asm ( "pushfq; popq %0" : "=m" (core_regs->rflags) ); + asm ( "movq %%rsp, %0" : "=m" (core_regs->rsp) ); + core_regs->ss = read_sreg(ss); + rdmsrl(MSR_FS_BASE, core_regs->thread_fs); + rdmsrl(MSR_GS_BASE, core_regs->thread_gs); + core_regs->ds = read_sreg(ds); + core_regs->es = read_sreg(es); + core_regs->fs = read_sreg(fs); + core_regs->gs = read_sreg(gs); + + asm ( "mov %%cr0, %0" : "=r" (xen_core_regs->cr0) ); + asm ( "mov %%cr2, %0" : "=r" (xen_core_regs->cr2) ); + asm ( "mov %%cr3, %0" : "=r" (xen_core_regs->cr3) ); + asm ( "mov %%cr4, %0" : "=r" (xen_core_regs->cr4) ); +} + +#endif /* __X86_64_ELF_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/x86_64/page.h b/xen/arch/x86/include/asm/x86_64/page.h new file mode 100644 index 0000000000..cb1db107c4 --- /dev/null +++ b/xen/arch/x86/include/asm/x86_64/page.h @@ -0,0 +1,166 @@ + +#ifndef __X86_64_PAGE_H__ +#define __X86_64_PAGE_H__ + +#define __XEN_VIRT_START XEN_VIRT_START + +#define VADDR_TOP_BIT (1UL << (VADDR_BITS - 1)) +#define CANONICAL_MASK (~0UL & ~VADDR_MASK) + +#define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63)) + +#ifndef __ASSEMBLY__ + +static inline unsigned long canonicalise_addr(unsigned long addr) +{ + if ( addr & VADDR_TOP_BIT ) + return addr | CANONICAL_MASK; + else + return addr & ~CANONICAL_MASK; +} + +#include + +#include + +/* + * Note: These are solely for the use by page_{get,set}_owner(), and + * therefore don't need to handle the XEN_VIRT_{START,END} range. + */ +#define virt_to_pdx(va) (((unsigned long)(va) - DIRECTMAP_VIRT_START) >> \ + PAGE_SHIFT) +#define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \ + ((unsigned long)(pdx) << PAGE_SHIFT))) + +static inline unsigned long __virt_to_maddr(unsigned long va) +{ + ASSERT(va < DIRECTMAP_VIRT_END); + if ( va >= DIRECTMAP_VIRT_START ) + va -= DIRECTMAP_VIRT_START; + else + { + BUILD_BUG_ON(XEN_VIRT_END - XEN_VIRT_START != GB(1)); + /* Signed, so ((long)XEN_VIRT_START >> 30) fits in an imm32. */ + ASSERT(((long)va >> (PAGE_ORDER_1G + PAGE_SHIFT)) == + ((long)XEN_VIRT_START >> (PAGE_ORDER_1G + PAGE_SHIFT))); + + va += xen_phys_start - XEN_VIRT_START; + } + return (va & ma_va_bottom_mask) | + ((va << pfn_pdx_hole_shift) & ma_top_mask); +} + +static inline void *__maddr_to_virt(unsigned long ma) +{ + ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT)); + return (void *)(DIRECTMAP_VIRT_START + + ((ma & ma_va_bottom_mask) | + ((ma & ma_top_mask) >> pfn_pdx_hole_shift))); +} + +/* read access (should only be used for debug printk's) */ +typedef u64 intpte_t; +#define PRIpte "016lx" + +typedef struct { intpte_t l1; } l1_pgentry_t; +typedef struct { intpte_t l2; } l2_pgentry_t; +typedef struct { intpte_t l3; } l3_pgentry_t; +typedef struct { intpte_t l4; } l4_pgentry_t; +typedef l4_pgentry_t root_pgentry_t; + +#endif /* !__ASSEMBLY__ */ + +#define pte_read_atomic(ptep) read_atomic(ptep) +#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte) +#define pte_write(ptep, pte) write_atomic(ptep, pte) + +/* Given a virtual address, get an entry offset into a linear page table. */ +#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT) +#define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT) +#define l3_linear_offset(_a) (((_a) & VADDR_MASK) >> L3_PAGETABLE_SHIFT) +#define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT) + +#define is_guest_l2_slot(_d, _t, _s) \ + ( !((_t) & PGT_pae_xen_l2) || \ + ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) ) +#define is_guest_l4_slot(_d, _s) \ + ( is_pv_32bit_domain(_d) \ + ? ((_s) == 0) \ + : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \ + ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))) + +#define root_table_offset l4_table_offset +#define root_get_pfn l4e_get_pfn +#define root_get_flags l4e_get_flags +#define root_get_intpte l4e_get_intpte +#define root_empty l4e_empty +#define root_from_paddr l4e_from_paddr +#define PGT_root_page_table PGT_l4_page_table + +/* + * PTE pfn and flags: + * 40-bit pfn = (pte[51:12]) + * 24-bit flags = (pte[63:52],pte[11:0]) + */ + +/* Extract flags into 24-bit integer, or turn 24-bit flags into a pte mask. */ +#ifndef __ASSEMBLY__ +static inline unsigned int get_pte_flags(intpte_t x) +{ + return ((x >> 40) & ~0xfff) | (x & 0xfff); +} + +static inline intpte_t put_pte_flags(unsigned int x) +{ + return (((intpte_t)x & ~0xfff) << 40) | (x & 0xfff); +} +#endif + +/* + * Protection keys define a new 4-bit protection key field + * (PKEY) in bits 62:59 of leaf entries of the page tables. + * This corresponds to bit 22:19 of a 24-bit flags. + * + * Notice: Bit 22 is used by _PAGE_GNTTAB which is visible to PV guests, + * so Protection keys must be disabled on PV guests. + */ +#define _PAGE_PKEY_BITS (0x780000) /* Protection Keys, 22:19 */ + +#define get_pte_pkey(x) (MASK_EXTR(get_pte_flags(x), _PAGE_PKEY_BITS)) + +/* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/ +#define _PAGE_NX_BIT (1U<<23) + +/* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/ +#define _PAGE_GNTTAB (1U<<22) + +/* + * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte. + * This is needed to distinguish between user and kernel PTEs since _PAGE_USER + * is asserted for both. + */ +#define _PAGE_GUEST_KERNEL (1U<<12) + +#define PAGE_HYPERVISOR_RO (__PAGE_HYPERVISOR_RO | _PAGE_GLOBAL) +#define PAGE_HYPERVISOR_RW (__PAGE_HYPERVISOR_RW | _PAGE_GLOBAL) +#define PAGE_HYPERVISOR_RX (__PAGE_HYPERVISOR_RX | _PAGE_GLOBAL) +#define PAGE_HYPERVISOR_RWX (__PAGE_HYPERVISOR | _PAGE_GLOBAL) +#define PAGE_HYPERVISOR_SHSTK (__PAGE_HYPERVISOR_SHSTK | _PAGE_GLOBAL) + +#define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW +#define PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR_UCMINUS | \ + _PAGE_GLOBAL | _PAGE_NX) +#define PAGE_HYPERVISOR_UC (__PAGE_HYPERVISOR_UC | \ + _PAGE_GLOBAL | _PAGE_NX) + +#endif /* __X86_64_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/x86_64/regs.h b/xen/arch/x86/include/asm/x86_64/regs.h new file mode 100644 index 0000000000..171cf9a2e2 --- /dev/null +++ b/xen/arch/x86/include/asm/x86_64/regs.h @@ -0,0 +1,28 @@ +#ifndef _X86_64_REGS_H +#define _X86_64_REGS_H + +#include +#include + +#define ring_0(r) (((r)->cs & 3) == 0) +#define ring_1(r) (((r)->cs & 3) == 1) +#define ring_2(r) (((r)->cs & 3) == 2) +#define ring_3(r) (((r)->cs & 3) == 3) + +#define guest_kernel_mode(v, r) \ + (!is_pv_32bit_vcpu(v) ? \ + (ring_3(r) && ((v)->arch.flags & TF_kernel_mode)) : \ + (ring_1(r))) + +#define permit_softint(dpl, v, r) \ + ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3)) + +/* Check for null trap callback handler: Is the EIP null? */ +#define null_trap_bounce(v, tb) \ + (!is_pv_32bit_vcpu(v) ? ((tb)->eip == 0) : (((tb)->cs & ~3) == 0)) + +/* Number of bytes of on-stack execution state to be context-switched. */ +/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */ +#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es)) + +#endif diff --git a/xen/arch/x86/include/asm/x86_64/system.h b/xen/arch/x86/include/asm/x86_64/system.h new file mode 100644 index 0000000000..e94371cf20 --- /dev/null +++ b/xen/arch/x86/include/asm/x86_64/system.h @@ -0,0 +1,62 @@ +#ifndef __X86_64_SYSTEM_H__ +#define __X86_64_SYSTEM_H__ + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ + (unsigned long)(n),sizeof(*(ptr)))) + +/* + * Atomic 16 bytes compare and exchange. Compare OLD with MEM, if + * identical, store NEW in MEM. Return the initial value in MEM. + * Success is indicated by comparing RETURN with OLD. + * + * This function can only be called when cpu_has_cx16 is true. + */ + +static always_inline __uint128_t __cmpxchg16b( + volatile void *ptr, const __uint128_t *oldp, const __uint128_t *newp) +{ + union { + struct { uint64_t lo, hi; }; + __uint128_t raw; + } new = { .raw = *newp }, old = { .raw = *oldp }, prev; + + ASSERT(cpu_has_cx16); + + /* Don't use "=A" here - clang can't deal with that. */ + asm volatile ( "lock cmpxchg16b %[ptr]" + : "=d" (prev.hi), "=a" (prev.lo), + [ptr] "+m" (*(volatile __uint128_t *)ptr) + : "c" (new.hi), "b" (new.lo), "d" (old.hi), "a" (old.lo) ); + + return prev.raw; +} + +static always_inline __uint128_t cmpxchg16b_local_( + void *ptr, const __uint128_t *oldp, const __uint128_t *newp) +{ + union { + struct { uint64_t lo, hi; }; + __uint128_t raw; + } new = { .raw = *newp }, old = { .raw = *oldp }, prev; + + ASSERT(cpu_has_cx16); + + /* Don't use "=A" here - clang can't deal with that. */ + asm volatile ( "cmpxchg16b %[ptr]" + : "=d" (prev.hi), "=a" (prev.lo), + [ptr] "+m" (*(__uint128_t *)ptr) + : "c" (new.hi), "b" (new.lo), "d" (old.hi), "a" (old.lo) ); + + return prev.raw; +} + +#define cmpxchg16b(ptr, o, n) ({ \ + volatile void *_p = (ptr); \ + ASSERT(!((unsigned long)_p & 0xf)); \ + BUILD_BUG_ON(sizeof(*(o)) != sizeof(__uint128_t)); \ + BUILD_BUG_ON(sizeof(*(n)) != sizeof(__uint128_t)); \ + __cmpxchg16b(_p, (void *)(o), (void *)(n)); \ +}) + +#endif /* __X86_64_SYSTEM_H__ */ diff --git a/xen/arch/x86/include/asm/x86_64/uaccess.h b/xen/arch/x86/include/asm/x86_64/uaccess.h new file mode 100644 index 0000000000..ba79f950fb --- /dev/null +++ b/xen/arch/x86/include/asm/x86_64/uaccess.h @@ -0,0 +1,70 @@ +#ifndef __X86_64_UACCESS_H +#define __X86_64_UACCESS_H + +/* + * With CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS (apparent) PV guest accesses + * are prohibited to touch the Xen private VA range. The compat argument + * translation area, therefore, can't live within this range. Domains + * (potentially) in need of argument translation (32-bit PV, possibly HVM) get + * a secondary mapping installed, which needs to be used for such accesses in + * the PV case, and will also be used for HVM to avoid extra conditionals. + */ +#define COMPAT_ARG_XLAT_VIRT_BASE ((void *)ARG_XLAT_START(current) + \ + (PERDOMAIN_ALT_VIRT_START - \ + PERDOMAIN_VIRT_START)) +#define COMPAT_ARG_XLAT_SIZE (2*PAGE_SIZE) +struct vcpu; +int setup_compat_arg_xlat(struct vcpu *v); +void free_compat_arg_xlat(struct vcpu *v); +#define is_compat_arg_xlat_range(addr, size) ({ \ + unsigned long __off; \ + __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \ + (__off < COMPAT_ARG_XLAT_SIZE) && \ + ((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE); \ +}) + +#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE) +#define xlat_page_size COMPAT_ARG_XLAT_SIZE +#define xlat_page_left_size(xlat_page_current) \ + (xlat_page_start + xlat_page_size - xlat_page_current) + +#define xlat_malloc_init(xlat_page_current) do { \ + xlat_page_current = xlat_page_start; \ +} while (0) + +extern void *xlat_malloc(unsigned long *xlat_page_current, size_t size); + +#define xlat_malloc_array(_p, _t, _c) ((_t *) xlat_malloc(&_p, sizeof(_t) * _c)) + +/* + * Valid if in +ve half of 48-bit address space, or above Xen-reserved area. + * This is also valid for range checks (addr, addr+size). As long as the + * start address is outside the Xen-reserved area, sequential accesses + * (starting at addr) will hit a non-canonical address (and thus fault) + * before ever reaching VIRT_START. + */ +#define __addr_ok(addr) \ + (((unsigned long)(addr) < (1UL<<47)) || \ + ((unsigned long)(addr) >= HYPERVISOR_VIRT_END)) + +#define access_ok(addr, size) \ + (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size)) + +#define array_access_ok(addr, count, size) \ + (likely(((count) ?: 0UL) < (~0UL / (size))) && \ + access_ok(addr, (count) * (size))) + +#define __compat_addr_ok(d, addr) \ + ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d)) + +#define __compat_access_ok(d, addr, size) \ + __compat_addr_ok(d, (unsigned long)(addr) + ((size) ? (size) - 1 : 0)) + +#define compat_access_ok(addr, size) \ + __compat_access_ok(current->domain, addr, size) + +#define compat_array_access_ok(addr,count,size) \ + (likely((count) < (~0U / (size))) && \ + compat_access_ok(addr, 0 + (count) * (size))) + +#endif /* __X86_64_UACCESS_H */ diff --git a/xen/arch/x86/include/asm/x86_emulate.h b/xen/arch/x86/include/asm/x86_emulate.h new file mode 100644 index 0000000000..c184c0053c --- /dev/null +++ b/xen/arch/x86/include/asm/x86_emulate.h @@ -0,0 +1,21 @@ +/****************************************************************************** + * x86_emulate.h + * + * Wrapper for generic x86 instruction decoder and emulator. + * + * Copyright (c) 2008, Citrix Systems, Inc. + * + * Authors: + * Keir Fraser + */ + +#ifndef __ASM_X86_X86_EMULATE_H__ +#define __ASM_X86_X86_EMULATE_H__ + +#include +#include +#include + +#include "../../x86_emulate/x86_emulate.h" + +#endif /* __ASM_X86_X86_EMULATE_H__ */ diff --git a/xen/arch/x86/include/asm/xenoprof.h b/xen/arch/x86/include/asm/xenoprof.h new file mode 100644 index 0000000000..cf6af8c5df --- /dev/null +++ b/xen/arch/x86/include/asm/xenoprof.h @@ -0,0 +1,107 @@ +/****************************************************************************** + * asm-x86/xenoprof.h + * xenoprof x86 arch specific header file + * + * Copyright (c) 2006 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see . + */ + +#ifndef __ASM_X86_XENOPROF_H__ +#define __ASM_X86_XENOPROF_H__ + +struct vcpu; + +#ifdef CONFIG_XENOPROF + +#include + +int nmi_reserve_counters(void); +int nmi_setup_events(void); +int nmi_enable_virq(void); +int nmi_start(void); +void nmi_stop(void); +void nmi_disable_virq(void); +void nmi_release_counters(void); + +int xenoprof_arch_init(int *num_events, char *cpu_type); +#define xenoprof_arch_reserve_counters() nmi_reserve_counters() +#define xenoprof_arch_setup_events() nmi_setup_events() +#define xenoprof_arch_enable_virq() nmi_enable_virq() +#define xenoprof_arch_start() nmi_start() +#define xenoprof_arch_stop() nmi_stop() +#define xenoprof_arch_disable_virq() nmi_disable_virq() +#define xenoprof_arch_release_counters() nmi_release_counters() + +int xenoprof_arch_counter(XEN_GUEST_HANDLE_PARAM(void) arg); +int compat_oprof_arch_counter(XEN_GUEST_HANDLE_PARAM(void) arg); +int xenoprof_arch_ibs_counter(XEN_GUEST_HANDLE_PARAM(void) arg); + +struct cpu_user_regs; + +/* AMD IBS support */ +void ibs_init(void); +extern u32 ibs_caps; + +int xenoprofile_get_mode(struct vcpu *, const struct cpu_user_regs *); + +static inline int xenoprof_backtrace_supported(void) +{ + return 1; +} + +void xenoprof_backtrace(struct vcpu *, const struct cpu_user_regs *, + unsigned long depth, int mode); + +int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content); +int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content); +void passive_domain_destroy(struct vcpu *v); + +bool nmi_oprofile_send_virq(void); + +#else + +static inline int passive_domain_do_rdmsr(unsigned int msr, + uint64_t *msr_content) +{ + return 0; +} + +static inline int passive_domain_do_wrmsr(unsigned int msr, + uint64_t msr_content) +{ + return 0; +} + +static inline void passive_domain_destroy(struct vcpu *v) {} + +static inline bool nmi_oprofile_send_virq(void) +{ + return false; +} + +#endif /* CONFIG_XENOPROF */ + +#endif /* __ASM_X86_XENOPROF_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/include/asm/xstate.h b/xen/arch/x86/include/asm/xstate.h new file mode 100644 index 0000000000..7ab0bdde89 --- /dev/null +++ b/xen/arch/x86/include/asm/xstate.h @@ -0,0 +1,141 @@ +/* + * include/asm-i386/xstate.h + * + * x86 extended state (xsave/xrstor) related definitions + * + */ + +#ifndef __ASM_XSTATE_H +#define __ASM_XSTATE_H + +#include +#include +#include + +#define FCW_DEFAULT 0x037f +#define FCW_RESET 0x0040 +#define MXCSR_DEFAULT 0x1f80 + +extern uint32_t mxcsr_mask; + +#define XSTATE_CPUID 0x0000000d + +#define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */ + +#define XSAVE_HDR_SIZE 64 +#define XSAVE_SSE_OFFSET 160 +#define XSTATE_YMM_SIZE 256 +#define FXSAVE_SIZE 512 +#define XSAVE_HDR_OFFSET FXSAVE_SIZE +#define XSTATE_AREA_MIN_SIZE (FXSAVE_SIZE + XSAVE_HDR_SIZE) + +#define XSTATE_FP_SSE (X86_XCR0_FP | X86_XCR0_SSE) +#define XCNTXT_MASK (X86_XCR0_FP | X86_XCR0_SSE | X86_XCR0_YMM | \ + X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM | \ + XSTATE_NONLAZY) + +#define XSTATE_ALL (~(1ULL << 63)) +#define XSTATE_NONLAZY (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | X86_XCR0_PKRU) +#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY) +#define XSTATE_XSAVES_ONLY 0 +#define XSTATE_COMPACTION_ENABLED (1ULL << 63) + +#define XSTATE_ALIGN64 (1U << 1) + +extern u64 xfeature_mask; +extern u64 xstate_align; +extern unsigned int *xstate_offsets; +extern unsigned int *xstate_sizes; + +/* extended state save area */ +struct __attribute__((aligned (64))) xsave_struct +{ + union __attribute__((aligned(16))) { /* FPU/MMX, SSE */ + char x[512]; + struct { + uint16_t fcw; + uint16_t fsw; + uint8_t ftw; + uint8_t rsvd1; + uint16_t fop; + union { + uint64_t addr; + struct { + uint32_t offs; + uint16_t sel; + uint16_t rsvd; + }; + } fip, fdp; + uint32_t mxcsr; + uint32_t mxcsr_mask; + /* data registers follow here */ + }; + } fpu_sse; + + struct xsave_hdr { + u64 xstate_bv; + u64 xcomp_bv; + u64 reserved[6]; + } xsave_hdr; /* The 64-byte header */ + + char data[]; /* Variable layout states */ +}; + +struct xstate_bndcsr { + uint64_t bndcfgu; + uint64_t bndstatus; +}; + +/* extended state operations */ +bool __must_check set_xcr0(u64 xfeatures); +uint64_t get_xcr0(void); +void set_msr_xss(u64 xss); +uint64_t get_msr_xss(void); +uint64_t read_bndcfgu(void); +void xsave(struct vcpu *v, uint64_t mask); +void xrstor(struct vcpu *v, uint64_t mask); +void xstate_set_init(uint64_t mask); +bool xsave_enabled(const struct vcpu *v); +int __must_check validate_xstate(const struct domain *d, + uint64_t xcr0, uint64_t xcr0_accum, + const struct xsave_hdr *hdr); +int __must_check handle_xsetbv(u32 index, u64 new_bv); +void expand_xsave_states(struct vcpu *v, void *dest, unsigned int size); +void compress_xsave_states(struct vcpu *v, const void *src, unsigned int size); + +/* extended state init and cleanup functions */ +void xstate_free_save_area(struct vcpu *v); +int xstate_alloc_save_area(struct vcpu *v); +void xstate_init(struct cpuinfo_x86 *c); +unsigned int xstate_ctxt_size(u64 xcr0); + +static inline uint64_t xgetbv(unsigned int index) +{ + uint32_t lo, hi; + + ASSERT(index); /* get_xcr0() should be used instead. */ + asm volatile ( ".byte 0x0f,0x01,0xd0" /* xgetbv */ + : "=a" (lo), "=d" (hi) : "c" (index) ); + + return lo | ((uint64_t)hi << 32); +} + +static inline bool xstate_all(const struct vcpu *v) +{ + /* + * XSTATE_FP_SSE may be excluded, because the offsets of XSTATE_FP_SSE + * (in the legacy region of xsave area) are fixed, so saving + * XSTATE_FP_SSE will not cause overwriting problem with XSAVES/XSAVEC. + */ + return (v->arch.xsave_area->xsave_hdr.xcomp_bv & + XSTATE_COMPACTION_ENABLED) && + (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE); +} + +static inline bool __nonnull(1) +xsave_area_compressed(const struct xsave_struct *xsave_area) +{ + return xsave_area->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED; +} + +#endif /* __ASM_XSTATE_H */ diff --git a/xen/common/efi/runtime.c b/xen/common/efi/runtime.c index fcd5a9cada..e3ce85d118 100644 --- a/xen/common/efi/runtime.c +++ b/xen/common/efi/runtime.c @@ -12,7 +12,7 @@ struct efi_rs_state { #ifdef CONFIG_X86 /* * The way stacks get set up leads to them always being on an 8-byte - * boundary not evenly divisible by 16 (see asm-x86/current.h). The EFI ABI, + * boundary not evenly divisible by 16 (see asm/current.h). The EFI ABI, * just like the CPU one, however requires stacks to be 16-byte aligned * before every function call. Since the compiler assumes this (unless * passing it -mpreferred-stack-boundary=3), it wouldn't generate code to diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index d0baaa2ecd..653f1ab09f 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -245,7 +245,7 @@ PAGE_LIST_HEAD(page_broken_list); /* * first_valid_mfn is exported because it is use in ARM specific NUMA - * helpers. See comment in asm-arm/numa.h. + * helpers. See comment in arch/arm/include/asm/numa.h. */ mfn_t first_valid_mfn = INVALID_MFN_INITIALIZER; diff --git a/xen/include/asm-arm/acpi.h b/xen/include/asm-arm/acpi.h deleted file mode 100644 index e53973e054..0000000000 --- a/xen/include/asm-arm/acpi.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2015, Shannon Zhao - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#ifndef _ASM_ARM_ACPI_H -#define _ASM_ARM_ACPI_H - -#include - -#define COMPILER_DEPENDENT_INT64 long long -#define COMPILER_DEPENDENT_UINT64 unsigned long long -#define ACPI_MAP_MEM_ATTR PAGE_HYPERVISOR - -/* Tables marked as reserved in efi table */ -typedef enum { - TBL_FADT, - TBL_MADT, - TBL_STAO, - TBL_XSDT, - TBL_RSDP, - TBL_EFIT, - TBL_MMAP, - TBL_MMAX, -} EFI_MEM_RES; - -bool acpi_psci_present(void); -bool acpi_psci_hvc_present(void); -void acpi_smp_init_cpus(void); - -/* - * This function returns the offset of a given ACPI/EFI table in the allocated - * memory region. Currently, the tables should be created in the same order as - * their associated 'index' in the enum EFI_MEM_RES. This means the function - * won't return the correct offset until all the tables before a given 'index' - * are created. - */ -paddr_t acpi_get_table_offset(struct membank tbl_add[], EFI_MEM_RES index); - -/* Macros for consistency checks of the GICC subtable of MADT */ -#define ACPI_MADT_GICC_LENGTH \ - (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) - -#define BAD_MADT_GICC_ENTRY(entry, end) \ - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ - (entry)->header.length != ACPI_MADT_GICC_LENGTH) - -#ifdef CONFIG_ACPI -extern bool acpi_disabled; -/* Basic configuration for ACPI */ -static inline void disable_acpi(void) -{ - acpi_disabled = true; -} - -static inline void enable_acpi(void) -{ - acpi_disabled = false; -} -#else -#define acpi_disabled (true) -#define disable_acpi() -#define enable_acpi() -#endif - -#endif /*_ASM_ARM_ACPI_H*/ diff --git a/xen/include/asm-arm/alternative.h b/xen/include/asm-arm/alternative.h deleted file mode 100644 index 1eb4b60fbb..0000000000 --- a/xen/include/asm-arm/alternative.h +++ /dev/null @@ -1,221 +0,0 @@ -#ifndef __ASM_ALTERNATIVE_H -#define __ASM_ALTERNATIVE_H - -#include -#include - -#define ARM_CB_PATCH ARM_NCAPS - -#ifndef __ASSEMBLY__ - -#include -#include - -struct alt_instr { - s32 orig_offset; /* offset to original instruction */ - s32 alt_offset; /* offset to replacement instruction */ - u16 cpufeature; /* cpufeature bit set for replacement */ - u8 orig_len; /* size of original instruction(s) */ - u8 alt_len; /* size of new instruction(s), <= orig_len */ -}; - -/* Xen: helpers used by common code. */ -#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f) -#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) -#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) - -typedef void (*alternative_cb_t)(const struct alt_instr *alt, - const uint32_t *origptr, uint32_t *updptr, - int nr_inst); - -void apply_alternatives_all(void); -int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end); - -#define ALTINSTR_ENTRY(feature, cb) \ - " .word 661b - .\n" /* label */ \ - " .if " __stringify(cb) " == 0\n" \ - " .word 663f - .\n" /* new instruction */ \ - " .else\n" \ - " .word " __stringify(cb) "- .\n" /* callback */ \ - " .endif\n" \ - " .hword " __stringify(feature) "\n" /* feature bit */ \ - " .byte 662b-661b\n" /* source len */ \ - " .byte 664f-663f\n" /* replacement len */ - -/* - * alternative assembly primitive: - * - * If any of these .org directive fail, it means that insn1 and insn2 - * don't have the same length. This used to be written as - * - * .if ((664b-663b) != (662b-661b)) - * .error "Alternatives instruction length mismatch" - * .endif - * - * but most assemblers die if insn1 or insn2 have a .inst. This should - * be fixed in a binutils release posterior to 2.25.51.0.2 (anything - * containing commit 4e4d08cf7399b606 or c1baaddf8861). - * - * Alternatives with callbacks do not generate replacement instructions. - */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ - ".if "__stringify(cfg_enabled)" == 1\n" \ - "661:\n\t" \ - oldinstr "\n" \ - "662:\n" \ - ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature,cb) \ - ".popsection\n" \ - " .if " __stringify(cb) " == 0\n" \ - ".pushsection .altinstr_replacement, \"ax\"\n" \ - "663:\n\t" \ - newinstr "\n" \ - "664:\n\t" \ - ".popsection\n\t" \ - ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ - ".else\n\t" \ - "663:\n\t" \ - "664:\n\t" \ - ".endif\n" \ - ".endif\n" - -#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) - -#define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM_CB_PATCH, 1, cb) -#else - -#include -#include - -.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len - .word \orig_offset - . - .word \alt_offset - . - .hword \feature - .byte \orig_len - .byte \alt_len -.endm - -.macro alternative_insn insn1, insn2, cap, enable = 1 - .if \enable -661: \insn1 -662: .pushsection .altinstructions, "a" - altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f - .popsection - .pushsection .altinstr_replacement, "ax" -663: \insn2 -664: .popsection - .org . - (664b-663b) + (662b-661b) - .org . - (662b-661b) + (664b-663b) - .endif -.endm - -/* - * Alternative sequences - * - * The code for the case where the capability is not present will be - * assembled and linked as normal. There are no restrictions on this - * code. - * - * The code for the case where the capability is present will be - * assembled into a special section to be used for dynamic patching. - * Code for that case must: - * - * 1. Be exactly the same length (in bytes) as the default code - * sequence. - * - * 2. Not contain a branch target that is used outside of the - * alternative sequence it is defined in (branches into an - * alternative sequence are not fixed up). - */ - -/* - * Begin an alternative code sequence. - */ -.macro alternative_if_not cap - .set .Lasm_alt_mode, 0 - .pushsection .altinstructions, "a" - altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f - .popsection -661: -.endm - -.macro alternative_if cap - .set .Lasm_alt_mode, 1 - .pushsection .altinstructions, "a" - altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f - .popsection - .pushsection .altinstr_replacement, "ax" - .align 2 /* So GAS knows label 661 is suitably aligned */ -661: -.endm - -/* - * Provide the other half of the alternative code sequence. - */ -.macro alternative_else -662: - .if .Lasm_alt_mode==0 - .pushsection .altinstr_replacement, "ax" - .else - .popsection - .endif -663: -.endm - -.macro alternative_cb cb - .set .Lasm_alt_mode, 0 - .pushsection .altinstructions, "a" - altinstruction_entry 661f, \cb, ARM_CB_PATCH, 662f-661f, 0 - .popsection -661: -.endm - -/* - * Complete an alternative code sequence. - */ -.macro alternative_endif -664: - .if .Lasm_alt_mode==0 - .popsection - .endif - .org . - (664b-663b) + (662b-661b) - .org . - (662b-661b) + (664b-663b) -.endm - -/* - * Provides a trivial alternative or default sequence consisting solely - * of NOPs. The number of NOPs is chosen automatically to match the - * previous case. - */ -.macro alternative_else_nop_endif -alternative_else - nops (662b-661b) / ARCH_PATCH_INSN_SIZE -alternative_endif -.endm - -/* - * Callback-based alternative epilogue - */ -.macro alternative_cb_end -662: -.endm - -#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ - alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) - -#endif /* __ASSEMBLY__ */ - -/* - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); - * - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); - * N.B. If CONFIG_FOO is specified, but not selected, the whole block - * will be omitted, including oldinstr. - */ -#define ALTERNATIVE(oldinstr, newinstr, ...) \ - _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) - -#endif /* __ASM_ALTERNATIVE_H */ diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h deleted file mode 100644 index df50cb2f09..0000000000 --- a/xen/include/asm-arm/altp2m.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Alternate p2m - * - * Copyright (c) 2014, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_ARM_ALTP2M_H -#define __ASM_ARM_ALTP2M_H - -#include - -/* Alternate p2m on/off per domain */ -static inline bool altp2m_active(const struct domain *d) -{ - /* Not implemented on ARM. */ - return false; -} - -/* Alternate p2m VCPU */ -static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v) -{ - /* Not implemented on ARM, should not be reached. */ - BUG(); - return 0; -} - -#endif /* __ASM_ARM_ALTP2M_H */ diff --git a/xen/include/asm-arm/arm32/atomic.h b/xen/include/asm-arm/arm32/atomic.h deleted file mode 100644 index 2832a72792..0000000000 --- a/xen/include/asm-arm/arm32/atomic.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * arch/arm/include/asm/atomic.h - * - * Copyright (C) 1996 Russell King. - * Copyright (C) 2002 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ARCH_ARM_ARM32_ATOMIC__ -#define __ARCH_ARM_ARM32_ATOMIC__ - -/* - * ARMv6 UP and SMP safe atomic ops. We use load exclusive and - * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. - */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -} - -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - prefetchw(&v->counter); - - __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - - smp_mb(); - - return result; -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -} - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - prefetchw(&v->counter); - - __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - - smp_mb(); - - return result; -} - -static inline void atomic_and(int m, atomic_t *v) -{ - unsigned long tmp; - int result; - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic_and\n" -"1: ldrex %0, [%3]\n" -" and %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (m) - : "cc"); -} - -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) -{ - int oldval; - unsigned long res; - - smp_mb(); - prefetchw(&ptr->counter); - - do { - __asm__ __volatile__("@ atomic_cmpxchg\n" - "ldrex %1, [%3]\n" - "mov %0, #0\n" - "teq %1, %4\n" - "strexeq %0, %5, [%3]\n" - : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) - : "r" (&ptr->counter), "Ir" (old), "r" (new) - : "cc"); - } while (res); - - smp_mb(); - - return oldval; -} - -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int oldval, newval; - unsigned long tmp; - - smp_mb(); - prefetchw(&v->counter); - - __asm__ __volatile__ ("@ atomic_add_unless\n" -"1: ldrex %0, [%4]\n" -" teq %0, %5\n" -" beq 2f\n" -" add %1, %0, %6\n" -" strex %2, %1, [%4]\n" -" teq %2, #0\n" -" bne 1b\n" -"2:" - : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (u), "r" (a) - : "cc"); - - if (oldval != u) - smp_mb(); - - return oldval; -} - -#endif /* __ARCH_ARM_ARM32_ATOMIC__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 8 - * indent-tabs-mode: t - * End: - */ diff --git a/xen/include/asm-arm/arm32/bitops.h b/xen/include/asm-arm/arm32/bitops.h deleted file mode 100644 index 57938a5874..0000000000 --- a/xen/include/asm-arm/arm32/bitops.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _ARM_ARM32_BITOPS_H -#define _ARM_ARM32_BITOPS_H - -#define flsl fls - -/* - * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. - */ -extern int _find_first_zero_bit_le(const void * p, unsigned size); -extern int _find_next_zero_bit_le(const void * p, int size, int offset); -extern int _find_first_bit_le(const unsigned long *p, unsigned size); -extern int _find_next_bit_le(const unsigned long *p, int size, int offset); - -/* - * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. - */ -extern int _find_first_zero_bit_be(const void * p, unsigned size); -extern int _find_next_zero_bit_be(const void * p, int size, int offset); -extern int _find_first_bit_be(const unsigned long *p, unsigned size); -extern int _find_next_bit_be(const unsigned long *p, int size, int offset); - -#ifndef __ARMEB__ -/* - * These are the little endian, atomic definitions. - */ -#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) -#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) -#define find_first_bit(p,sz) _find_first_bit_le(p,sz) -#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) - -#else -/* - * These are the big endian, atomic definitions. - */ -#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) -#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) -#define find_first_bit(p,sz) _find_first_bit_be(p,sz) -#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) - -#endif - -#endif /* _ARM_ARM32_BITOPS_H */ diff --git a/xen/include/asm-arm/arm32/bug.h b/xen/include/asm-arm/arm32/bug.h deleted file mode 100644 index 25cce151dc..0000000000 --- a/xen/include/asm-arm/arm32/bug.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ARM_ARM32_BUG_H__ -#define __ARM_ARM32_BUG_H__ - -#include - -/* ARMv7 provides a list of undefined opcode (see A8.8.247 DDI 0406C.b) - * Use one them encoding A1 to go in exception mode - */ -#define BUG_OPCODE 0xe7f000f0 - -#define BUG_INSTR ".word " __stringify(BUG_OPCODE) - -#define BUG_FN_REG r0 - -#endif /* __ARM_ARM32_BUG_H__ */ diff --git a/xen/include/asm-arm/arm32/cmpxchg.h b/xen/include/asm-arm/arm32/cmpxchg.h deleted file mode 100644 index b0bd1d8b68..0000000000 --- a/xen/include/asm-arm/arm32/cmpxchg.h +++ /dev/null @@ -1,229 +0,0 @@ -#ifndef __ASM_ARM32_CMPXCHG_H -#define __ASM_ARM32_CMPXCHG_H - -#include - -extern void __bad_xchg(volatile void *, int); - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - unsigned long ret; - unsigned int tmp; - - smp_mb(); - prefetchw((const void *)ptr); - - switch (size) { - case 1: - asm volatile("@ __xchg1\n" - "1: ldrexb %0, [%3]\n" - " strexb %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - "1: ldrex %0, [%3]\n" - " strex %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - default: - __bad_xchg(ptr, size), ret = 0; - break; - } - smp_mb(); - - return ret; -} - -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); - -#define __CMPXCHG_CASE(sz, name) \ -static inline bool __cmpxchg_case_##name(volatile void *ptr, \ - unsigned long *old, \ - unsigned long new, \ - bool timeout, \ - unsigned int max_try) \ -{ \ - unsigned long oldval; \ - unsigned long res; \ - \ - do { \ - asm volatile("@ __cmpxchg_case_" #name "\n" \ - " ldrex" #sz " %1, [%2]\n" \ - " mov %0, #0\n" \ - " teq %1, %3\n" \ - " strex" #sz "eq %0, %4, [%2]\n" \ - : "=&r" (res), "=&r" (oldval) \ - : "r" (ptr), "Ir" (*old), "r" (new) \ - : "memory", "cc"); \ - \ - if (!res) \ - break; \ - } while (!timeout || ((--max_try) > 0)); \ - \ - *old = oldval; \ - \ - return !res; \ -} - -__CMPXCHG_CASE(b, 1) -__CMPXCHG_CASE(h, 2) -__CMPXCHG_CASE( , 4) - -static inline bool __cmpxchg_case_8(volatile uint64_t *ptr, - uint64_t *old, - uint64_t new, - bool timeout, - unsigned int max_try) -{ - uint64_t oldval; - uint64_t res; - - do { - asm volatile( - " ldrexd %1, %H1, [%3]\n" - " teq %1, %4\n" - " teqeq %H1, %H4\n" - " movne %0, #0\n" - " movne %H0, #0\n" - " bne 2f\n" - " strexd %0, %5, %H5, [%3]\n" - "2:" - : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) - : "r" (ptr), "r" (*old), "r" (new) - : "memory", "cc"); - if (!res) - break; - } while (!timeout || ((--max_try) > 0)); - - *old = oldval; - - return !res; -} - -static always_inline bool __int_cmpxchg(volatile void *ptr, unsigned long *old, - unsigned long new, int size, - bool timeout, unsigned int max_try) -{ - prefetchw((const void *)ptr); - - switch (size) { - case 1: - return __cmpxchg_case_1(ptr, old, new, timeout, max_try); - case 2: - return __cmpxchg_case_2(ptr, old, new, timeout, max_try); - case 4: - return __cmpxchg_case_4(ptr, old, new, timeout, max_try); - default: - return __bad_cmpxchg(ptr, size); - } - - ASSERT_UNREACHABLE(); -} - -static always_inline unsigned long __cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, - int size) -{ - smp_mb(); - if (!__int_cmpxchg(ptr, &old, new, size, false, 0)) - ASSERT_UNREACHABLE(); - smp_mb(); - - return old; -} - -/* - * The helper may fail to update the memory if the action takes too long. - * - * @old: On call the value pointed contains the expected old value. It will be - * updated to the actual old value. - * @max_try: Maximum number of iterations - * - * The helper will return true when the update has succeeded (i.e no - * timeout) and false if the update has failed. - */ -static always_inline bool __cmpxchg_timeout(volatile void *ptr, - unsigned long *old, - unsigned long new, - int size, - unsigned int max_try) -{ - bool ret; - - smp_mb(); - ret = __int_cmpxchg(ptr, old, new, size, true, max_try); - smp_mb(); - - return ret; -} - -/* - * The helper may fail to update the memory if the action takes too long. - * - * @old: On call the value pointed contains the expected old value. It will be - * updated to the actual old value. - * @max_try: Maximum number of iterations - * - * The helper will return true when the update has succeeded (i.e no - * timeout) and false if the update has failed. - */ -static always_inline bool __cmpxchg64_timeout(volatile uint64_t *ptr, - uint64_t *old, - uint64_t new, - unsigned int max_try) -{ - bool ret; - - smp_mb(); - ret = __cmpxchg_case_8(ptr, old, new, true, max_try); - smp_mb(); - - return ret; -} - -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -static inline uint64_t cmpxchg64(volatile uint64_t *ptr, - uint64_t old, - uint64_t new) -{ - smp_mb(); - if (!__cmpxchg_case_8(ptr, &old, new, false, 0)) - ASSERT_UNREACHABLE(); - smp_mb(); - - return old; -} - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 8 - * indent-tabs-mode: t - * End: - */ diff --git a/xen/include/asm-arm/arm32/flushtlb.h b/xen/include/asm-arm/arm32/flushtlb.h deleted file mode 100644 index 9085e65011..0000000000 --- a/xen/include/asm-arm/arm32/flushtlb.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef __ASM_ARM_ARM32_FLUSHTLB_H__ -#define __ASM_ARM_ARM32_FLUSHTLB_H__ - -/* - * Every invalidation operation use the following patterns: - * - * DSB ISHST // Ensure prior page-tables updates have completed - * TLBI... // Invalidate the TLB - * DSB ISH // Ensure the TLB invalidation has completed - * ISB // See explanation below - * - * For Xen page-tables the ISB will discard any instructions fetched - * from the old mappings. - * - * For the Stage-2 page-tables the ISB ensures the completion of the DSB - * (and therefore the TLB invalidation) before continuing. So we know - * the TLBs cannot contain an entry for a mapping we may have removed. - */ -#define TLB_HELPER(name, tlbop) \ -static inline void name(void) \ -{ \ - dsb(ishst); \ - WRITE_CP32(0, tlbop); \ - dsb(ish); \ - isb(); \ -} - -/* Flush local TLBs, current VMID only */ -TLB_HELPER(flush_guest_tlb_local, TLBIALL); - -/* Flush inner shareable TLBs, current VMID only */ -TLB_HELPER(flush_guest_tlb, TLBIALLIS); - -/* Flush local TLBs, all VMIDs, non-hypervisor mode */ -TLB_HELPER(flush_all_guests_tlb_local, TLBIALLNSNH); - -/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */ -TLB_HELPER(flush_all_guests_tlb, TLBIALLNSNHIS); - -/* Flush all hypervisor mappings from the TLB of the local processor. */ -TLB_HELPER(flush_xen_tlb_local, TLBIALLH); - -/* Flush TLB of local processor for address va. */ -static inline void __flush_xen_tlb_one_local(vaddr_t va) -{ - asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory"); -} - -/* Flush TLB of all processors in the inner-shareable domain for address va. */ -static inline void __flush_xen_tlb_one(vaddr_t va) -{ - asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory"); -} - -#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm32/insn.h b/xen/include/asm-arm/arm32/insn.h deleted file mode 100644 index c800cbfff5..0000000000 --- a/xen/include/asm-arm/arm32/insn.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (C) 2017 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ARCH_ARM_ARM32_INSN -#define __ARCH_ARM_ARM32_INSN - -#include - -int32_t aarch32_get_branch_offset(uint32_t insn); -uint32_t aarch32_set_branch_offset(uint32_t insn, int32_t offset); - -/* Wrapper for common code */ -static inline bool insn_is_branch_imm(uint32_t insn) -{ - /* - * Xen is using ARM execution state only on ARM32 platform. So, the - * Thumb branch instructions (CBZ, CBNZ, TBB and TBH) will not be used - * in Xen. The left ARM32 branch instructions are BX, BLX, BL and B. - * BX is using register as parameter, we don't need to rewrite it. So, - * we only need to check BLX, BL and B encodings in this function. - * - * From ARM DDI 0406C.c Section A8.8.18 and A8.8.25, we can see these - * three branch instructions' encodings: - * - b cccc1010xxxxxxxxxxxxxxxxxxxxxxxx - * - bl cccc1011xxxxxxxxxxxxxxxxxxxxxxxx - * - blx 1111101Hxxxxxxxxxxxxxxxxxxxxxxxx - * - * The H bit of blx can be 0 or 1, it depends on the Instruction Sets of - * target instruction. Regardless, if we mask the conditional bits and - * bit 24 (H bit of blx), we can see all above branch instructions have - * the same value 0x0A000000. - * - * And from ARM DDI 0406C.c Section A5.7 Table A5-23, we can see that the - * blx is the only one unconditional instruction has the same value as - * conditional branch instructions. So, mask the conditional bits will not - * make other unconditional instruction to hit this check. - */ - return ( (insn & 0x0E000000) == 0x0A000000 ); -} - -static inline int32_t insn_get_branch_offset(uint32_t insn) -{ - return aarch32_get_branch_offset(insn); -} - -static inline uint32_t insn_set_branch_offset(uint32_t insn, int32_t offset) -{ - return aarch32_set_branch_offset(insn, offset); -} - -#endif /* !__ARCH_ARM_ARM32_INSN */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm32/io.h b/xen/include/asm-arm/arm32/io.h deleted file mode 100644 index 73a879e9fb..0000000000 --- a/xen/include/asm-arm/arm32/io.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Based on linux arch/arm/include/asm/io.h - * - * Copyright (C) 1996-2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Modifications: - * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both - * constant addresses and variable addresses. - * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture - * specific IO header files. - * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. - * 04-Apr-1999 PJB Added check_signature. - * 12-Dec-1999 RMK More cleanups - * 18-Jun-2000 RMK Removed virt_to_* and friends definitions - * 05-Oct-2004 BJD Moved memory string functions to use void __iomem - */ -#ifndef _ARM_ARM32_IO_H -#define _ARM_ARM32_IO_H - -#include -#include - -static inline void __raw_writeb(u8 val, volatile void __iomem *addr) -{ - asm volatile("strb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr) - : "r" (val)); -} - -static inline void __raw_writew(u16 val, volatile void __iomem *addr) -{ - asm volatile("strh %1, %0" - : "+Q" (*(volatile u16 __force *)addr) - : "r" (val)); -} - -static inline void __raw_writel(u32 val, volatile void __iomem *addr) -{ - asm volatile("str %1, %0" - : "+Qo" (*(volatile u32 __force *)addr) - : "r" (val)); -} - -static inline u8 __raw_readb(const volatile void __iomem *addr) -{ - u8 val; - asm volatile("ldrb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr), - "=r" (val)); - return val; -} - -static inline u16 __raw_readw(const volatile void __iomem *addr) -{ - u16 val; - asm volatile("ldrh %1, %0" - : "+Q" (*(volatile u16 __force *)addr), - "=r" (val)); - return val; -} - -static inline u32 __raw_readl(const volatile void __iomem *addr) -{ - u32 val; - asm volatile("ldr %1, %0" - : "+Qo" (*(volatile u32 __force *)addr), - "=r" (val)); - return val; -} - -#define __iormb() rmb() -#define __iowmb() wmb() - -#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) -#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ - __raw_readw(c)); __r; }) -#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ - __raw_readl(c)); __r; }) - -#define writeb_relaxed(v,c) __raw_writeb(v,c) -#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) -#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) - -#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) -#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) -#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) - -#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) -#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) -#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) - -#endif /* _ARM_ARM32_IO_H */ diff --git a/xen/include/asm-arm/arm32/macros.h b/xen/include/asm-arm/arm32/macros.h deleted file mode 100644 index a4e20aa520..0000000000 --- a/xen/include/asm-arm/arm32/macros.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_ARM_ARM32_MACROS_H -#define __ASM_ARM_ARM32_MACROS_H - - .macro ret - mov pc, lr - .endm - -#endif /* __ASM_ARM_ARM32_MACROS_H */ diff --git a/xen/include/asm-arm/arm32/mm.h b/xen/include/asm-arm/arm32/mm.h deleted file mode 100644 index 68612499bf..0000000000 --- a/xen/include/asm-arm/arm32/mm.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ARM_ARM32_MM_H__ -#define __ARM_ARM32_MM_H__ - -/* - * Only a limited amount of RAM, called xenheap, is always mapped on ARM32. - * For convenience always return false. - */ -static inline bool arch_mfn_in_directmap(unsigned long mfn) -{ - return false; -} - -#endif /* __ARM_ARM32_MM_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h deleted file mode 100644 index 715a9e4fef..0000000000 --- a/xen/include/asm-arm/arm32/page.h +++ /dev/null @@ -1,118 +0,0 @@ -#ifndef __ARM_ARM32_PAGE_H__ -#define __ARM_ARM32_PAGE_H__ - -#ifndef __ASSEMBLY__ - -/* Write a pagetable entry. - * - * If the table entry is changing a text mapping, it is responsibility - * of the caller to issue an ISB after write_pte. - */ -static inline void write_pte(lpae_t *p, lpae_t pte) -{ - asm volatile ( - /* Ensure any writes have completed with the old mappings. */ - "dsb;" - /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */ - "strd %0, %H0, [%1];" - "dsb;" - : : "r" (pte.bits), "r" (p) : "memory"); -} - -/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */ -#define __invalidate_dcache_one(R) STORE_CP32(R, DCIMVAC) - -/* Inline ASM to flush dcache on register R (may be an inline asm operand) */ -#define __clean_dcache_one(R) STORE_CP32(R, DCCMVAC) - -/* Inline ASM to clean and invalidate dcache on register R (may be an - * inline asm operand) */ -#define __clean_and_invalidate_dcache_one(R) STORE_CP32(R, DCCIMVAC) - -/* - * Invalidate all instruction caches in Inner Shareable domain to PoU. - * We also need to flush the branch predictor for ARMv7 as it may be - * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). - */ -static inline void invalidate_icache(void) -{ - asm volatile ( - CMD_CP32(ICIALLUIS) /* Flush I-cache. */ - CMD_CP32(BPIALLIS) /* Flush branch predictor. */ - : : : "memory"); - - dsb(ish); /* Ensure completion of the flush I-cache */ - isb(); /* Synchronize fetched instruction stream. */ -} - -/* - * Invalidate all instruction caches on the local processor to PoU. - * We also need to flush the branch predictor for ARMv7 as it may be - * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b). - */ -static inline void invalidate_icache_local(void) -{ - asm volatile ( - CMD_CP32(ICIALLU) /* Flush I-cache. */ - CMD_CP32(BPIALL) /* Flush branch predictor. */ - : : : "memory"); - - dsb(nsh); /* Ensure completion of the flush I-cache */ - isb(); /* Synchronize fetched instruction stream. */ -} - -/* Ask the MMU to translate a VA for us */ -static inline uint64_t __va_to_par(vaddr_t va) -{ - uint64_t par, tmp; - tmp = READ_CP64(PAR); - WRITE_CP32(va, ATS1HR); - isb(); /* Ensure result is available. */ - par = READ_CP64(PAR); - WRITE_CP64(tmp, PAR); - return par; -} - -/* Ask the MMU to translate a Guest VA for us */ -static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags) -{ - uint64_t par, tmp; - tmp = READ_CP64(PAR); - if ( (flags & GV2M_WRITE) == GV2M_WRITE ) - WRITE_CP32(va, ATS12NSOPW); - else - WRITE_CP32(va, ATS12NSOPR); - isb(); /* Ensure result is available. */ - par = READ_CP64(PAR); - WRITE_CP64(tmp, PAR); - return par; -} -static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags) -{ - uint64_t par, tmp; - tmp = READ_CP64(PAR); - if ( (flags & GV2M_WRITE) == GV2M_WRITE ) - WRITE_CP32(va, ATS1CPW); - else - WRITE_CP32(va, ATS1CPR); - isb(); /* Ensure result is available. */ - par = READ_CP64(PAR); - WRITE_CP64(tmp, PAR); - return par; -} - -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) - -#endif /* __ASSEMBLY__ */ - -#endif /* __ARM_ARM32_PAGE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm32/processor.h b/xen/include/asm-arm/arm32/processor.h deleted file mode 100644 index 4e679f3273..0000000000 --- a/xen/include/asm-arm/arm32/processor.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef __ASM_ARM_ARM32_PROCESSOR_H -#define __ASM_ARM_ARM32_PROCESSOR_H - -#define ACTLR_CAXX_SMP (1<<6) - -#ifndef __ASSEMBLY__ -/* On stack VCPU state */ -struct cpu_user_regs -{ - uint32_t r0; - uint32_t r1; - uint32_t r2; - uint32_t r3; - uint32_t r4; - uint32_t r5; - uint32_t r6; - uint32_t r7; - uint32_t r8; - uint32_t r9; - uint32_t r10; - union { - uint32_t r11; - uint32_t fp; - }; - uint32_t r12; - - uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */ - - /* r14 - LR: is the same physical register as LR_usr */ - union { - uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */ - - uint32_t lr_usr; - }; - - union { /* Return IP, pc32 is used to allow code to be common with 64-bit */ - uint32_t pc, pc32; - }; - uint32_t cpsr; /* Return mode */ - uint32_t hsr; /* Exception Syndrome */ - - /* Outer guest frame only from here on... */ - - uint32_t sp_usr; /* LR_usr is the same register as LR, see above */ - - uint32_t sp_irq, lr_irq; - uint32_t sp_svc, lr_svc; - uint32_t sp_abt, lr_abt; - uint32_t sp_und, lr_und; - - uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq; - uint32_t sp_fiq, lr_fiq; - - uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq; - - uint32_t pad1; /* Doubleword-align the user half of the frame */ -}; - -#endif - -#endif /* __ASM_ARM_ARM32_PROCESSOR_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm32/sysregs.h b/xen/include/asm-arm/arm32/sysregs.h deleted file mode 100644 index 6841d5de43..0000000000 --- a/xen/include/asm-arm/arm32/sysregs.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef __ASM_ARM_ARM32_SYSREGS_H -#define __ASM_ARM_ARM32_SYSREGS_H - -#include -#include - -/* Layout as used in assembly, with src/dest registers mixed in */ -#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2 -#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm -#define CP32(r, name...) __CP32(r, name) -#define CP64(r, name...) __CP64(r, name) - -/* Stringified for inline assembly */ -#define LOAD_CP32(r, name...) "mrc " __stringify(CP32(%r, name)) ";" -#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";" -#define LOAD_CP64(r, name...) "mrrc " __stringify(CP64(%r, %H##r, name)) ";" -#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";" - -/* Issue a CP operation which takes no argument, - * uses r0 as a placeholder register. */ -#define CMD_CP32(name...) "mcr " __stringify(CP32(r0, name)) ";" - -#ifndef __ASSEMBLY__ - -/* C wrappers */ -#define READ_CP32(name...) ({ \ - register uint32_t _r; \ - asm volatile(LOAD_CP32(0, name) : "=r" (_r)); \ - _r; }) - -#define WRITE_CP32(v, name...) do { \ - register uint32_t _r = (v); \ - asm volatile(STORE_CP32(0, name) : : "r" (_r)); \ -} while (0) - -#define READ_CP64(name...) ({ \ - register uint64_t _r; \ - asm volatile(LOAD_CP64(0, name) : "=r" (_r)); \ - _r; }) - -#define WRITE_CP64(v, name...) do { \ - register uint64_t _r = (v); \ - asm volatile(STORE_CP64(0, name) : : "r" (_r)); \ -} while (0) - -/* - * C wrappers for accessing system registers. - * - * Registers come in 3 types: - * - those which are always 32-bit regardless of AArch32 vs AArch64 - * (use {READ,WRITE}_SYSREG32). - * - those which are always 64-bit regardless of AArch32 vs AArch64 - * (use {READ,WRITE}_SYSREG64). - * - those which vary between AArch32 and AArch64 (use {READ,WRITE}_SYSREG). - */ -#define READ_SYSREG32(R...) READ_CP32(R) -#define WRITE_SYSREG32(V, R...) WRITE_CP32(V, R) - -#define READ_SYSREG64(R...) READ_CP64(R) -#define WRITE_SYSREG64(V, R...) WRITE_CP64(V, R) - -#define READ_SYSREG(R...) READ_SYSREG32(R) -#define WRITE_SYSREG(V, R...) WRITE_SYSREG32(V, R) - -/* MVFR2 is not defined on ARMv7 */ -#define MVFR2_MAYBE_UNDEFINED - -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_ARM_ARM32_SYSREGS_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h deleted file mode 100644 index ab57abfbc5..0000000000 --- a/xen/include/asm-arm/arm32/system.h +++ /dev/null @@ -1,77 +0,0 @@ -/* Portions taken from Linux arch arm */ -#ifndef __ASM_ARM32_SYSTEM_H -#define __ASM_ARM32_SYSTEM_H - -#include - -#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" ) -#define local_irq_enable() asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" ) - -#define local_save_flags(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile ( "mrs %0, cpsr @ local_save_flags\n" \ - : "=r" (x) :: "memory", "cc" ); \ -}) -#define local_irq_save(x) \ -({ \ - local_save_flags(x); \ - local_irq_disable(); \ -}) -#define local_irq_restore(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile ( \ - "msr cpsr_c, %0 @ local_irq_restore\n" \ - : \ - : "r" (x) \ - : "memory", "cc"); \ -}) - -static inline int local_irq_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !(flags & PSR_IRQ_MASK); -} - -#define local_fiq_enable() __asm__("cpsie f @ __stf\n" : : : "memory", "cc") -#define local_fiq_disable() __asm__("cpsid f @ __clf\n" : : : "memory", "cc") - -#define local_abort_enable() __asm__("cpsie a @ __sta\n" : : : "memory", "cc") -#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc") - -static inline int local_fiq_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !(flags & PSR_FIQ_MASK); -} - -#define CSDB ".inst 0xe320f014" - -static inline unsigned long array_index_mask_nospec(unsigned long idx, - unsigned long sz) -{ - unsigned long mask; - - asm volatile( "cmp %1, %2\n" - "sbc %0, %1, %1\n" - CSDB - : "=r" (mask) - : "r" (idx), "Ir" (sz) - : "cc" ); - - return mask; -} -#define array_index_mask_nospec array_index_mask_nospec - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm32/traps.h b/xen/include/asm-arm/arm32/traps.h deleted file mode 100644 index e3c4a8b473..0000000000 --- a/xen/include/asm-arm/arm32/traps.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ASM_ARM32_TRAPS__ -#define __ASM_ARM32_TRAPS__ - -#endif /* __ASM_ARM32_TRAPS__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ - diff --git a/xen/include/asm-arm/arm32/vfp.h b/xen/include/asm-arm/arm32/vfp.h deleted file mode 100644 index bade3bc66e..0000000000 --- a/xen/include/asm-arm/arm32/vfp.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _ARM_ARM32_VFP_H -#define _ARM_ARM32_VFP_H - -#define FPEXC_EX (1u << 31) -#define FPEXC_EN (1u << 30) -#define FPEXC_FP2V (1u << 28) - -#define MVFR0_A_SIMD_MASK (0xf << 0) - - -#define FPSID_IMPLEMENTER_BIT (24) -#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT) -#define FPSID_ARCH_BIT (16) -#define FPSID_ARCH_MASK (0xf << FPSID_ARCH_BIT) -#define FPSID_PART_BIT (8) -#define FPSID_PART_MASK (0xff << FPSID_PART_BIT) -#define FPSID_VARIANT_BIT (4) -#define FPSID_VARIANT_MASK (0xf << FPSID_VARIANT_BIT) -#define FPSID_REV_BIT (0) -#define FPSID_REV_MASK (0xf << FPSID_REV_BIT) - -struct vfp_state -{ - uint64_t fpregs1[16]; /* {d0-d15} */ - uint64_t fpregs2[16]; /* {d16-d31} */ - uint32_t fpexc; - uint32_t fpscr; - /* VFP implementation specific state */ - uint32_t fpinst; - uint32_t fpinst2; -}; - -#endif /* _ARM_ARM32_VFP_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/atomic.h b/xen/include/asm-arm/arm64/atomic.h deleted file mode 100644 index 2d42567866..0000000000 --- a/xen/include/asm-arm/arm64/atomic.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Based on arch/arm64/include/asm/atomic.h - * which in turn is - * Based on arch/arm/include/asm/atomic.h - * - * Copyright (C) 1996 Russell King. - * Copyright (C) 2002 Deep Blue Solutions Ltd. - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ARCH_ARM_ARM64_ATOMIC -#define __ARCH_ARM_ARM64_ATOMIC - -/* - * AArch64 UP and SMP safe atomic ops. We use load exclusive and - * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. - */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - asm volatile("// atomic_add\n" -"1: ldxr %w0, %2\n" -" add %w0, %w0, %w3\n" -" stxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i)); -} - -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - asm volatile("// atomic_add_return\n" -"1: ldxr %w0, %2\n" -" add %w0, %w0, %w3\n" -" stlxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "memory"); - - smp_mb(); - return result; -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - asm volatile("// atomic_sub\n" -"1: ldxr %w0, %2\n" -" sub %w0, %w0, %w3\n" -" stxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i)); -} - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - asm volatile("// atomic_sub_return\n" -"1: ldxr %w0, %2\n" -" sub %w0, %w0, %w3\n" -" stlxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "memory"); - - smp_mb(); - return result; -} - -static inline void atomic_and(int m, atomic_t *v) -{ - unsigned long tmp; - int result; - - asm volatile("// atomic_and\n" -"1: ldxr %w0, %2\n" -" and %w0, %w0, %w3\n" -" stxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (m)); -} - -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) -{ - unsigned long tmp; - int oldval; - - smp_mb(); - - asm volatile("// atomic_cmpxchg\n" -"1: ldxr %w1, %2\n" -" cmp %w1, %w3\n" -" b.ne 2f\n" -" stxr %w0, %w4, %2\n" -" cbnz %w0, 1b\n" -"2:" - : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) - : "Ir" (old), "r" (new) - : "cc"); - - smp_mb(); - return oldval; -} - -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) - c = old; - return c; -} - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 8 - * indent-tabs-mode: t - * End: - */ diff --git a/xen/include/asm-arm/arm64/bitops.h b/xen/include/asm-arm/arm64/bitops.h deleted file mode 100644 index d85a49bca4..0000000000 --- a/xen/include/asm-arm/arm64/bitops.h +++ /dev/null @@ -1,98 +0,0 @@ -#ifndef _ARM_ARM64_BITOPS_H -#define _ARM_ARM64_BITOPS_H - -/* Based on linux/include/asm-generic/bitops/builtin-__ffs.h */ -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static /*__*/always_inline unsigned long __ffs(unsigned long word) -{ - return __builtin_ctzl(word); -} - -/* Based on linux/include/asm-generic/bitops/ffz.h */ -/* - * ffz - find first zero in word. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -#define ffz(x) __ffs(~(x)) - -static inline int flsl(unsigned long x) -{ - uint64_t ret; - - if (__builtin_constant_p(x)) - return generic_flsl(x); - - asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); - - return BITS_PER_LONG - ret; -} - -/* Based on linux/include/asm-generic/bitops/find.h */ - -#ifndef find_next_bit -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - */ -extern unsigned long find_next_bit(const unsigned long *addr, unsigned long - size, unsigned long offset); -#endif - -#ifndef find_next_zero_bit -/** - * find_next_zero_bit - find the next cleared bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The bitmap size in bits - */ -extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned - long size, unsigned long offset); -#endif - -#ifdef CONFIG_GENERIC_FIND_FIRST_BIT - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit number of the first set bit. - */ -extern unsigned long find_first_bit(const unsigned long *addr, - unsigned long size); - -/** - * find_first_zero_bit - find the first cleared bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit number of the first cleared bit. - */ -extern unsigned long find_first_zero_bit(const unsigned long *addr, - unsigned long size); -#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ - -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) -#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) - -#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ - - -#endif /* _ARM_ARM64_BITOPS_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/brk.h b/xen/include/asm-arm/arm64/brk.h deleted file mode 100644 index 04442c4b9f..0000000000 --- a/xen/include/asm-arm/arm64/brk.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (C) 2016 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ASM_ARM_ARM64_BRK -#define __ASM_ARM_ARM64_BRK - -/* - * #imm16 values used for BRK instruction generation - * 0x001: xen-mode BUG() and WARN() traps - * 0x002: for triggering a fault on purpose (reserved) - */ -#define BRK_BUG_FRAME_IMM 1 -#define BRK_FAULT_IMM 2 - -/* - * BRK instruction encoding - * The #imm16 value should be placed at bits[20:5] within BRK ins - */ -#define AARCH64_BREAK_MON 0xd4200000 - -/* - * BRK instruction for provoking a fault on purpose - */ -#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (BRK_FAULT_IMM << 5)) - -#endif /* !__ASM_ARM_ARM64_BRK */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/bug.h b/xen/include/asm-arm/arm64/bug.h deleted file mode 100644 index 5e11c0dfd5..0000000000 --- a/xen/include/asm-arm/arm64/bug.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __ARM_ARM64_BUG_H__ -#define __ARM_ARM64_BUG_H__ - -#include -#include - -#define BUG_INSTR "brk " __stringify(BRK_BUG_FRAME_IMM) - -#define BUG_FN_REG x0 - -#endif /* __ARM_ARM64_BUG_H__ */ diff --git a/xen/include/asm-arm/arm64/cmpxchg.h b/xen/include/asm-arm/arm64/cmpxchg.h deleted file mode 100644 index 10e4edc022..0000000000 --- a/xen/include/asm-arm/arm64/cmpxchg.h +++ /dev/null @@ -1,183 +0,0 @@ -#ifndef __ASM_ARM64_CMPXCHG_H -#define __ASM_ARM64_CMPXCHG_H - -extern void __bad_xchg(volatile void *, int); - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - unsigned long ret, tmp; - - switch (size) { - case 1: - asm volatile("// __xchg1\n" - "1: ldxrb %w0, %2\n" - " stlxrb %w1, %w3, %2\n" - " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) - : "r" (x) - : "memory"); - break; - case 2: - asm volatile("// __xchg2\n" - "1: ldxrh %w0, %2\n" - " stlxrh %w1, %w3, %2\n" - " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) - : "r" (x) - : "memory"); - break; - case 4: - asm volatile("// __xchg4\n" - "1: ldxr %w0, %2\n" - " stlxr %w1, %w3, %2\n" - " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) - : "r" (x) - : "memory"); - break; - case 8: - asm volatile("// __xchg8\n" - "1: ldxr %0, %2\n" - " stlxr %w1, %3, %2\n" - " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) - : "r" (x) - : "memory"); - break; - default: - __bad_xchg(ptr, size), ret = 0; - break; - } - - smp_mb(); - return ret; -} - -#define xchg(ptr,x) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ - __ret; \ -}) - -extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); - -#define __CMPXCHG_CASE(w, sz, name) \ -static inline bool __cmpxchg_case_##name(volatile void *ptr, \ - unsigned long *old, \ - unsigned long new, \ - bool timeout, \ - unsigned int max_try) \ -{ \ - unsigned long oldval; \ - unsigned long res; \ - \ - do { \ - asm volatile("// __cmpxchg_case_" #name "\n" \ - " ldxr" #sz " %" #w "1, %2\n" \ - " mov %w0, #0\n" \ - " cmp %" #w "1, %" #w "3\n" \ - " b.ne 1f\n" \ - " stxr" #sz " %w0, %" #w "4, %2\n" \ - "1:\n" \ - : "=&r" (res), "=&r" (oldval), \ - "+Q" (*(unsigned long *)ptr) \ - : "Ir" (*old), "r" (new) \ - : "cc"); \ - \ - if (!res) \ - break; \ - } while (!timeout || ((--max_try) > 0)); \ - \ - *old = oldval; \ - \ - return !res; \ -} - -__CMPXCHG_CASE(w, b, 1) -__CMPXCHG_CASE(w, h, 2) -__CMPXCHG_CASE(w, , 4) -__CMPXCHG_CASE( , , 8) - -static always_inline bool __int_cmpxchg(volatile void *ptr, unsigned long *old, - unsigned long new, int size, - bool timeout, unsigned int max_try) -{ - switch (size) { - case 1: - return __cmpxchg_case_1(ptr, old, new, timeout, max_try); - case 2: - return __cmpxchg_case_2(ptr, old, new, timeout, max_try); - case 4: - return __cmpxchg_case_4(ptr, old, new, timeout, max_try); - case 8: - return __cmpxchg_case_8(ptr, old, new, timeout, max_try); - default: - return __bad_cmpxchg(ptr, size); - } - - ASSERT_UNREACHABLE(); -} - -static always_inline unsigned long __cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, - int size) -{ - smp_mb(); - if (!__int_cmpxchg(ptr, &old, new, size, false, 0)) - ASSERT_UNREACHABLE(); - smp_mb(); - - return old; -} - -/* - * The helper may fail to update the memory if the action takes too long. - * - * @old: On call the value pointed contains the expected old value. It will be - * updated to the actual old value. - * @max_try: Maximum number of iterations - * - * The helper will return true when the update has succeeded (i.e no - * timeout) and false if the update has failed. - */ -static always_inline bool __cmpxchg_timeout(volatile void *ptr, - unsigned long *old, - unsigned long new, - int size, - unsigned int max_try) -{ - bool ret; - - smp_mb(); - ret = __int_cmpxchg(ptr, old, new, size, true, max_try); - smp_mb(); - - return ret; -} - -#define cmpxchg(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \ - sizeof(*(ptr))); \ - __ret; \ -}) - -#define cmpxchg64(ptr, o, n) cmpxchg(ptr, o, n) - -#define __cmpxchg64_timeout(ptr, old, new, max_try) \ - __cmpxchg_timeout(ptr, old, new, 8, max_try) - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 8 - * indent-tabs-mode: t - * End: - */ diff --git a/xen/include/asm-arm/arm64/cpufeature.h b/xen/include/asm-arm/arm64/cpufeature.h deleted file mode 100644 index d9b9fa77cb..0000000000 --- a/xen/include/asm-arm/arm64/cpufeature.h +++ /dev/null @@ -1,104 +0,0 @@ -#ifndef __ASM_ARM_ARM64_CPUFEATURES_H -#define __ASM_ARM_ARM64_CPUFEATURES_H - -/* - * CPU feature register tracking - * - * The safe value of a CPUID feature field is dependent on the implications - * of the values assigned to it by the architecture. Based on the relationship - * between the values, the features are classified into 3 types - LOWER_SAFE, - * HIGHER_SAFE and EXACT. - * - * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest - * for HIGHER_SAFE. It is expected that all CPUs have the same value for - * a field when EXACT is specified, failing which, the safe value specified - * in the table is chosen. - */ - -enum ftr_type { - FTR_EXACT, /* Use a predefined safe value */ - FTR_LOWER_SAFE, /* Smaller value is safe */ - FTR_HIGHER_SAFE, /* Bigger value is safe */ - FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ -}; - -#define FTR_STRICT true /* SANITY check strict matching required */ -#define FTR_NONSTRICT false /* SANITY check ignored */ - -#define FTR_SIGNED true /* Value should be treated as signed */ -#define FTR_UNSIGNED false /* Value should be treated as unsigned */ - -#define FTR_VISIBLE true /* Feature visible to the user space */ -#define FTR_HIDDEN false /* Feature is hidden from the user */ - -#define FTR_VISIBLE_IF_IS_ENABLED(config) \ - (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN) - -struct arm64_ftr_bits { - bool sign; /* Value is signed ? */ - bool visible; - bool strict; /* CPU Sanity check: strict matching required ? */ - enum ftr_type type; - u8 shift; - u8 width; - s64 safe_val; /* safe value for FTR_EXACT features */ -}; - -static inline int __attribute_const__ -cpuid_feature_extract_signed_field_width(u64 features, int field, int width) -{ - return (s64)(features << (64 - width - field)) >> (64 - width); -} - -static inline int __attribute_const__ -cpuid_feature_extract_signed_field(u64 features, int field) -{ - return cpuid_feature_extract_signed_field_width(features, field, 4); -} - -static inline unsigned int __attribute_const__ -cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) -{ - return (u64)(features << (64 - width - field)) >> (64 - width); -} - -static inline unsigned int __attribute_const__ -cpuid_feature_extract_unsigned_field(u64 features, int field) -{ - return cpuid_feature_extract_unsigned_field_width(features, field, 4); -} - -static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) -{ - return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); -} - -static inline int __attribute_const__ -cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) -{ - return (sign) ? - cpuid_feature_extract_signed_field_width(features, field, width) : - cpuid_feature_extract_unsigned_field_width(features, field, width); -} - -static inline int __attribute_const__ -cpuid_feature_extract_field(u64 features, int field, bool sign) -{ - return cpuid_feature_extract_field_width(features, field, 4, sign); -} - -static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) -{ - return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); -} - -#endif /* _ASM_ARM_ARM64_CPUFEATURES_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/efibind.h b/xen/include/asm-arm/arm64/efibind.h deleted file mode 100644 index 2b0bf40bf2..0000000000 --- a/xen/include/asm-arm/arm64/efibind.h +++ /dev/null @@ -1,216 +0,0 @@ -/*++ - -Copyright (c) 1998 Intel Corporation - -Module Name: - - efefind.h - -Abstract: - - EFI to compile bindings - - - - -Revision History - ---*/ - -#ifndef __GNUC__ -#pragma pack() -#endif - -#define EFIERR(a) (0x8000000000000000 | a) -#define EFI_ERROR_MASK 0x8000000000000000 -#define EFIERR_OEM(a) (0xc000000000000000 | a) - -#define BAD_POINTER 0xFBFBFBFBFBFBFBFB -#define MAX_ADDRESS 0xFFFFFFFFFFFFFFFF - -#define EFI_STUB_ERROR MAX_ADDRESS - -#ifndef __ASSEMBLY__ -// -// Basic int types of various widths -// - -#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L ) - - // No ANSI C 1999/2000 stdint.h integer width declarations - - #if defined(__GNUC__) - typedef unsigned long long uint64_t __attribute__((aligned (8))); - typedef long long int64_t __attribute__((aligned (8))); - typedef unsigned int uint32_t; - typedef int int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef char int8_t; - #elif defined(UNIX_LP64) - - /* Use LP64 programming model from C_FLAGS for integer width declarations */ - - typedef unsigned long uint64_t; - typedef long int64_t; - typedef unsigned int uint32_t; - typedef int int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef char int8_t; - #else - - /* Assume P64 programming model from C_FLAGS for integer width declarations */ - - typedef unsigned long long uint64_t __attribute__((aligned (8))); - typedef long long int64_t __attribute__((aligned (8))); - typedef unsigned int uint32_t; - typedef int int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef char int8_t; - #endif -#endif - -// -// Basic EFI types of various widths -// - -#ifndef __WCHAR_TYPE__ -# define __WCHAR_TYPE__ short -#endif - -typedef uint64_t UINT64; -typedef int64_t INT64; - -#ifndef _BASETSD_H_ - typedef uint32_t UINT32; - typedef int32_t INT32; -#endif - -typedef uint16_t UINT16; -typedef int16_t INT16; -typedef uint8_t UINT8; -typedef int8_t INT8; -typedef __WCHAR_TYPE__ WCHAR; - -#undef VOID -#define VOID void - - -typedef int64_t INTN; -typedef uint64_t UINTN; - -#define POST_CODE(_Data) - - -#define BREAKPOINT() while (TRUE); // Make it hang on Bios[Dbg]32 - -// -// Pointers must be aligned to these address to function -// - -#define MIN_ALIGNMENT_SIZE 4 - -#define ALIGN_VARIABLE(Value ,Adjustment) \ - (UINTN)Adjustment = 0; \ - if((UINTN)Value % MIN_ALIGNMENT_SIZE) \ - (UINTN)Adjustment = MIN_ALIGNMENT_SIZE - ((UINTN)Value % MIN_ALIGNMENT_SIZE); \ - Value = (UINTN)Value + (UINTN)Adjustment - - -// -// Define macros to build data structure signatures from characters. -// - -#define EFI_SIGNATURE_16(A,B) ((A) | (B<<8)) -#define EFI_SIGNATURE_32(A,B,C,D) (EFI_SIGNATURE_16(A,B) | (EFI_SIGNATURE_16(C,D) << 16)) -#define EFI_SIGNATURE_64(A,B,C,D,E,F,G,H) (EFI_SIGNATURE_32(A,B,C,D) | ((UINT64)(EFI_SIGNATURE_32(E,F,G,H)) << 32)) - -#define EXPORTAPI - - -// -// EFIAPI - prototype calling convention for EFI function pointers -// BOOTSERVICE - prototype for implementation of a boot service interface -// RUNTIMESERVICE - prototype for implementation of a runtime service interface -// RUNTIMEFUNCTION - prototype for implementation of a runtime function that is not a service -// RUNTIME_CODE - pragma macro for declaring runtime code -// - -#ifndef EFIAPI // Forces EFI calling conventions reguardless of compiler options - #define EFIAPI // Substitute expresion to force C calling convention -#endif - -#define BOOTSERVICE -//#define RUNTIMESERVICE(proto,a) alloc_text("rtcode",a); proto a -//#define RUNTIMEFUNCTION(proto,a) alloc_text("rtcode",a); proto a -#define RUNTIMESERVICE -#define RUNTIMEFUNCTION - - -#define RUNTIME_CODE(a) alloc_text("rtcode", a) -#define BEGIN_RUNTIME_DATA() data_seg("rtdata") -#define END_RUNTIME_DATA() data_seg("") - -#define VOLATILE volatile - -#define MEMORY_FENCE() - - -// -// When build similiar to FW, then link everything together as -// one big module. -// - -#define EFI_DRIVER_ENTRY_POINT(InitFunction) \ - UINTN \ - InitializeDriver ( \ - VOID *ImageHandle, \ - VOID *SystemTable \ - ) \ - { \ - return InitFunction(ImageHandle, \ - SystemTable); \ - } \ - \ - EFI_STATUS efi_main( \ - EFI_HANDLE image, \ - EFI_SYSTEM_TABLE *systab \ - ) __attribute__((weak, \ - alias ("InitializeDriver"))); - -#define LOAD_INTERNAL_DRIVER(_if, type, name, entry) \ - (_if)->LoadInternal(type, name, entry) - - -// -// Some compilers don't support the forward reference construct: -// typedef struct XXXXX -// -// The following macro provide a workaround for such cases. -// -#ifdef NO_INTERFACE_DECL -#define INTERFACE_DECL(x) -#else -#ifdef __GNUC__ -#define INTERFACE_DECL(x) struct x -#else -#define INTERFACE_DECL(x) typedef struct x -#endif -#endif - -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/flushtlb.h b/xen/include/asm-arm/arm64/flushtlb.h deleted file mode 100644 index 7c54315187..0000000000 --- a/xen/include/asm-arm/arm64/flushtlb.h +++ /dev/null @@ -1,77 +0,0 @@ -#ifndef __ASM_ARM_ARM64_FLUSHTLB_H__ -#define __ASM_ARM_ARM64_FLUSHTLB_H__ - -/* - * Every invalidation operation use the following patterns: - * - * DSB ISHST // Ensure prior page-tables updates have completed - * TLBI... // Invalidate the TLB - * DSB ISH // Ensure the TLB invalidation has completed - * ISB // See explanation below - * - * ARM64_WORKAROUND_REPEAT_TLBI: - * Modification of the translation table for a virtual address might lead to - * read-after-read ordering violation. - * The workaround repeats TLBI+DSB operation for all the TLB flush operations. - * While this is stricly not necessary, we don't want to take any risk. - * - * For Xen page-tables the ISB will discard any instructions fetched - * from the old mappings. - * - * For the Stage-2 page-tables the ISB ensures the completion of the DSB - * (and therefore the TLB invalidation) before continuing. So we know - * the TLBs cannot contain an entry for a mapping we may have removed. - */ -#define TLB_HELPER(name, tlbop) \ -static inline void name(void) \ -{ \ - asm volatile( \ - "dsb ishst;" \ - "tlbi " # tlbop ";" \ - ALTERNATIVE( \ - "nop; nop;", \ - "dsb ish;" \ - "tlbi " # tlbop ";", \ - ARM64_WORKAROUND_REPEAT_TLBI, \ - CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ - "dsb ish;" \ - "isb;" \ - : : : "memory"); \ -} - -/* Flush local TLBs, current VMID only. */ -TLB_HELPER(flush_guest_tlb_local, vmalls12e1); - -/* Flush innershareable TLBs, current VMID only */ -TLB_HELPER(flush_guest_tlb, vmalls12e1is); - -/* Flush local TLBs, all VMIDs, non-hypervisor mode */ -TLB_HELPER(flush_all_guests_tlb_local, alle1); - -/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */ -TLB_HELPER(flush_all_guests_tlb, alle1is); - -/* Flush all hypervisor mappings from the TLB of the local processor. */ -TLB_HELPER(flush_xen_tlb_local, alle2); - -/* Flush TLB of local processor for address va. */ -static inline void __flush_xen_tlb_one_local(vaddr_t va) -{ - asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory"); -} - -/* Flush TLB of all processors in the inner-shareable domain for address va. */ -static inline void __flush_xen_tlb_one(vaddr_t va) -{ - asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory"); -} - -#endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/hsr.h b/xen/include/asm-arm/arm64/hsr.h deleted file mode 100644 index e691d41c17..0000000000 --- a/xen/include/asm-arm/arm64/hsr.h +++ /dev/null @@ -1,159 +0,0 @@ -#ifndef __ASM_ARM_ARM64_HSR_H -#define __ASM_ARM_ARM64_HSR_H - -/* AArch 64 System Register Encodings */ -#define __HSR_SYSREG_c0 0 -#define __HSR_SYSREG_c1 1 -#define __HSR_SYSREG_c2 2 -#define __HSR_SYSREG_c3 3 -#define __HSR_SYSREG_c4 4 -#define __HSR_SYSREG_c5 5 -#define __HSR_SYSREG_c6 6 -#define __HSR_SYSREG_c7 7 -#define __HSR_SYSREG_c8 8 -#define __HSR_SYSREG_c9 9 -#define __HSR_SYSREG_c10 10 -#define __HSR_SYSREG_c11 11 -#define __HSR_SYSREG_c12 12 -#define __HSR_SYSREG_c13 13 -#define __HSR_SYSREG_c14 14 -#define __HSR_SYSREG_c15 15 - -#define __HSR_SYSREG_0 0 -#define __HSR_SYSREG_1 1 -#define __HSR_SYSREG_2 2 -#define __HSR_SYSREG_3 3 -#define __HSR_SYSREG_4 4 -#define __HSR_SYSREG_5 5 -#define __HSR_SYSREG_6 6 -#define __HSR_SYSREG_7 7 - -/* These are used to decode traps with HSR.EC==HSR_EC_SYSREG */ -#define HSR_SYSREG(op0,op1,crn,crm,op2) \ - (((__HSR_SYSREG_##op0) << HSR_SYSREG_OP0_SHIFT) | \ - ((__HSR_SYSREG_##op1) << HSR_SYSREG_OP1_SHIFT) | \ - ((__HSR_SYSREG_##crn) << HSR_SYSREG_CRN_SHIFT) | \ - ((__HSR_SYSREG_##crm) << HSR_SYSREG_CRM_SHIFT) | \ - ((__HSR_SYSREG_##op2) << HSR_SYSREG_OP2_SHIFT)) - -#define HSR_SYSREG_DCISW HSR_SYSREG(1,0,c7,c6,2) -#define HSR_SYSREG_DCCSW HSR_SYSREG(1,0,c7,c10,2) -#define HSR_SYSREG_DCCISW HSR_SYSREG(1,0,c7,c14,2) - -#define HSR_SYSREG_MDSCR_EL1 HSR_SYSREG(2,0,c0,c2,2) -#define HSR_SYSREG_MDRAR_EL1 HSR_SYSREG(2,0,c1,c0,0) -#define HSR_SYSREG_OSLAR_EL1 HSR_SYSREG(2,0,c1,c0,4) -#define HSR_SYSREG_OSLSR_EL1 HSR_SYSREG(2,0,c1,c1,4) -#define HSR_SYSREG_OSDLR_EL1 HSR_SYSREG(2,0,c1,c3,4) -#define HSR_SYSREG_DBGPRCR_EL1 HSR_SYSREG(2,0,c1,c4,4) -#define HSR_SYSREG_MDCCSR_EL0 HSR_SYSREG(2,3,c0,c1,0) - -#define HSR_SYSREG_DBGBVRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,4) -#define HSR_SYSREG_DBGBCRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,5) -#define HSR_SYSREG_DBGWVRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,6) -#define HSR_SYSREG_DBGWCRn_EL1(n) HSR_SYSREG(2,0,c0,c##n,7) - -#define HSR_SYSREG_DBG_CASES(REG) case HSR_SYSREG_##REG##n_EL1(0): \ - case HSR_SYSREG_##REG##n_EL1(1): \ - case HSR_SYSREG_##REG##n_EL1(2): \ - case HSR_SYSREG_##REG##n_EL1(3): \ - case HSR_SYSREG_##REG##n_EL1(4): \ - case HSR_SYSREG_##REG##n_EL1(5): \ - case HSR_SYSREG_##REG##n_EL1(6): \ - case HSR_SYSREG_##REG##n_EL1(7): \ - case HSR_SYSREG_##REG##n_EL1(8): \ - case HSR_SYSREG_##REG##n_EL1(9): \ - case HSR_SYSREG_##REG##n_EL1(10): \ - case HSR_SYSREG_##REG##n_EL1(11): \ - case HSR_SYSREG_##REG##n_EL1(12): \ - case HSR_SYSREG_##REG##n_EL1(13): \ - case HSR_SYSREG_##REG##n_EL1(14): \ - case HSR_SYSREG_##REG##n_EL1(15) - -#define HSR_SYSREG_SCTLR_EL1 HSR_SYSREG(3,0,c1, c0,0) -#define HSR_SYSREG_ACTLR_EL1 HSR_SYSREG(3,0,c1, c0,1) -#define HSR_SYSREG_TTBR0_EL1 HSR_SYSREG(3,0,c2, c0,0) -#define HSR_SYSREG_TTBR1_EL1 HSR_SYSREG(3,0,c2, c0,1) -#define HSR_SYSREG_TCR_EL1 HSR_SYSREG(3,0,c2, c0,2) -#define HSR_SYSREG_AFSR0_EL1 HSR_SYSREG(3,0,c5, c1,0) -#define HSR_SYSREG_AFSR1_EL1 HSR_SYSREG(3,0,c5, c1,1) -#define HSR_SYSREG_ESR_EL1 HSR_SYSREG(3,0,c5, c2,0) -#define HSR_SYSREG_FAR_EL1 HSR_SYSREG(3,0,c6, c0,0) -#define HSR_SYSREG_PMINTENSET_EL1 HSR_SYSREG(3,0,c9,c14,1) -#define HSR_SYSREG_PMINTENCLR_EL1 HSR_SYSREG(3,0,c9,c14,2) -#define HSR_SYSREG_MAIR_EL1 HSR_SYSREG(3,0,c10,c2,0) -#define HSR_SYSREG_AMAIR_EL1 HSR_SYSREG(3,0,c10,c3,0) -#define HSR_SYSREG_ICC_SGI1R_EL1 HSR_SYSREG(3,0,c12,c11,5) -#define HSR_SYSREG_ICC_ASGI1R_EL1 HSR_SYSREG(3,1,c12,c11,6) -#define HSR_SYSREG_ICC_SGI0R_EL1 HSR_SYSREG(3,2,c12,c11,7) -#define HSR_SYSREG_ICC_SRE_EL1 HSR_SYSREG(3,0,c12,c12,5) -#define HSR_SYSREG_CONTEXTIDR_EL1 HSR_SYSREG(3,0,c13,c0,1) - -#define HSR_SYSREG_PMCR_EL0 HSR_SYSREG(3,3,c9,c12,0) -#define HSR_SYSREG_PMCNTENSET_EL0 HSR_SYSREG(3,3,c9,c12,1) -#define HSR_SYSREG_PMCNTENCLR_EL0 HSR_SYSREG(3,3,c9,c12,2) -#define HSR_SYSREG_PMOVSCLR_EL0 HSR_SYSREG(3,3,c9,c12,3) -#define HSR_SYSREG_PMSWINC_EL0 HSR_SYSREG(3,3,c9,c12,4) -#define HSR_SYSREG_PMSELR_EL0 HSR_SYSREG(3,3,c9,c12,5) -#define HSR_SYSREG_PMCEID0_EL0 HSR_SYSREG(3,3,c9,c12,6) -#define HSR_SYSREG_PMCEID1_EL0 HSR_SYSREG(3,3,c9,c12,7) - -#define HSR_SYSREG_PMCCNTR_EL0 HSR_SYSREG(3,3,c9,c13,0) -#define HSR_SYSREG_PMXEVTYPER_EL0 HSR_SYSREG(3,3,c9,c13,1) -#define HSR_SYSREG_PMXEVCNTR_EL0 HSR_SYSREG(3,3,c9,c13,2) - -#define HSR_SYSREG_PMUSERENR_EL0 HSR_SYSREG(3,3,c9,c14,0) -#define HSR_SYSREG_PMOVSSET_EL0 HSR_SYSREG(3,3,c9,c14,3) - -#define HSR_SYSREG_CNTPCT_EL0 HSR_SYSREG(3,3,c14,c0,0) -#define HSR_SYSREG_CNTP_TVAL_EL0 HSR_SYSREG(3,3,c14,c2,0) -#define HSR_SYSREG_CNTP_CTL_EL0 HSR_SYSREG(3,3,c14,c2,1) -#define HSR_SYSREG_CNTP_CVAL_EL0 HSR_SYSREG(3,3,c14,c2,2) - -/* Those registers are used when HCR_EL2.TID3 is set */ -#define HSR_SYSREG_ID_PFR0_EL1 HSR_SYSREG(3,0,c0,c1,0) -#define HSR_SYSREG_ID_PFR1_EL1 HSR_SYSREG(3,0,c0,c1,1) -#define HSR_SYSREG_ID_PFR2_EL1 HSR_SYSREG(3,0,c0,c3,4) -#define HSR_SYSREG_ID_DFR0_EL1 HSR_SYSREG(3,0,c0,c1,2) -#define HSR_SYSREG_ID_DFR1_EL1 HSR_SYSREG(3,0,c0,c3,5) -#define HSR_SYSREG_ID_AFR0_EL1 HSR_SYSREG(3,0,c0,c1,3) -#define HSR_SYSREG_ID_MMFR0_EL1 HSR_SYSREG(3,0,c0,c1,4) -#define HSR_SYSREG_ID_MMFR1_EL1 HSR_SYSREG(3,0,c0,c1,5) -#define HSR_SYSREG_ID_MMFR2_EL1 HSR_SYSREG(3,0,c0,c1,6) -#define HSR_SYSREG_ID_MMFR3_EL1 HSR_SYSREG(3,0,c0,c1,7) -#define HSR_SYSREG_ID_MMFR4_EL1 HSR_SYSREG(3,0,c0,c2,6) -#define HSR_SYSREG_ID_MMFR5_EL1 HSR_SYSREG(3,0,c0,c3,6) -#define HSR_SYSREG_ID_ISAR0_EL1 HSR_SYSREG(3,0,c0,c2,0) -#define HSR_SYSREG_ID_ISAR1_EL1 HSR_SYSREG(3,0,c0,c2,1) -#define HSR_SYSREG_ID_ISAR2_EL1 HSR_SYSREG(3,0,c0,c2,2) -#define HSR_SYSREG_ID_ISAR3_EL1 HSR_SYSREG(3,0,c0,c2,3) -#define HSR_SYSREG_ID_ISAR4_EL1 HSR_SYSREG(3,0,c0,c2,4) -#define HSR_SYSREG_ID_ISAR5_EL1 HSR_SYSREG(3,0,c0,c2,5) -#define HSR_SYSREG_ID_ISAR6_EL1 HSR_SYSREG(3,0,c0,c2,7) -#define HSR_SYSREG_MVFR0_EL1 HSR_SYSREG(3,0,c0,c3,0) -#define HSR_SYSREG_MVFR1_EL1 HSR_SYSREG(3,0,c0,c3,1) -#define HSR_SYSREG_MVFR2_EL1 HSR_SYSREG(3,0,c0,c3,2) - -#define HSR_SYSREG_ID_AA64PFR0_EL1 HSR_SYSREG(3,0,c0,c4,0) -#define HSR_SYSREG_ID_AA64PFR1_EL1 HSR_SYSREG(3,0,c0,c4,1) -#define HSR_SYSREG_ID_AA64DFR0_EL1 HSR_SYSREG(3,0,c0,c5,0) -#define HSR_SYSREG_ID_AA64DFR1_EL1 HSR_SYSREG(3,0,c0,c5,1) -#define HSR_SYSREG_ID_AA64ISAR0_EL1 HSR_SYSREG(3,0,c0,c6,0) -#define HSR_SYSREG_ID_AA64ISAR1_EL1 HSR_SYSREG(3,0,c0,c6,1) -#define HSR_SYSREG_ID_AA64MMFR0_EL1 HSR_SYSREG(3,0,c0,c7,0) -#define HSR_SYSREG_ID_AA64MMFR1_EL1 HSR_SYSREG(3,0,c0,c7,1) -#define HSR_SYSREG_ID_AA64MMFR2_EL1 HSR_SYSREG(3,0,c0,c7,2) -#define HSR_SYSREG_ID_AA64AFR0_EL1 HSR_SYSREG(3,0,c0,c5,4) -#define HSR_SYSREG_ID_AA64AFR1_EL1 HSR_SYSREG(3,0,c0,c5,5) -#define HSR_SYSREG_ID_AA64ZFR0_EL1 HSR_SYSREG(3,0,c0,c4,4) - -#endif /* __ASM_ARM_ARM64_HSR_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/insn.h b/xen/include/asm-arm/arm64/insn.h deleted file mode 100644 index 4e0d364d41..0000000000 --- a/xen/include/asm-arm/arm64/insn.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (C) 2013 Huawei Ltd. - * Author: Jiang Liu - * - * Copyright (C) 2014 Zi Shen Lim - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ARCH_ARM_ARM64_INSN -#define __ARCH_ARM_ARM64_INSN - -#include -#include - -enum aarch64_insn_hint_op { - AARCH64_INSN_HINT_NOP = 0x0 << 5, - AARCH64_INSN_HINT_YIELD = 0x1 << 5, - AARCH64_INSN_HINT_WFE = 0x2 << 5, - AARCH64_INSN_HINT_WFI = 0x3 << 5, - AARCH64_INSN_HINT_SEV = 0x4 << 5, - AARCH64_INSN_HINT_SEVL = 0x5 << 5, -}; - -enum aarch64_insn_imm_type { - AARCH64_INSN_IMM_ADR, - AARCH64_INSN_IMM_26, - AARCH64_INSN_IMM_19, - AARCH64_INSN_IMM_16, - AARCH64_INSN_IMM_14, - AARCH64_INSN_IMM_12, - AARCH64_INSN_IMM_9, - AARCH64_INSN_IMM_7, - AARCH64_INSN_IMM_6, - AARCH64_INSN_IMM_S, - AARCH64_INSN_IMM_R, - AARCH64_INSN_IMM_MAX -}; - -enum aarch64_insn_branch_type { - AARCH64_INSN_BRANCH_NOLINK, - AARCH64_INSN_BRANCH_LINK, - AARCH64_INSN_BRANCH_RETURN, - AARCH64_INSN_BRANCH_COMP_ZERO, - AARCH64_INSN_BRANCH_COMP_NONZERO, -}; - -#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ -static always_inline bool aarch64_insn_is_##abbr(u32 code) \ -{ return (code & (mask)) == (val); } \ -static always_inline u32 aarch64_insn_get_##abbr##_value(void) \ -{ return (val); } - -__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) -__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) -__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000) -__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000) -__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000) -__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000) -__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) -__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) - -bool aarch64_insn_is_branch_imm(u32 insn); - -u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); -u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, - u32 insn, u64 imm); - -s32 aarch64_get_branch_offset(u32 insn); -u32 aarch64_set_branch_offset(u32 insn, s32 offset); - -u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, - enum aarch64_insn_branch_type type); -u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); -u32 aarch64_insn_gen_nop(void); - -/* Wrapper for common code */ -static inline bool insn_is_branch_imm(u32 insn) -{ - return aarch64_insn_is_branch_imm(insn); -} - -static inline s32 insn_get_branch_offset(u32 insn) -{ - return aarch64_get_branch_offset(insn); -} - -static inline u32 insn_set_branch_offset(u32 insn, s32 offset) -{ - return aarch64_set_branch_offset(insn, offset); -} - -#endif /* !__ARCH_ARM_ARM64_INSN */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 8 - * indent-tabs-mode: t - * End: - */ diff --git a/xen/include/asm-arm/arm64/io.h b/xen/include/asm-arm/arm64/io.h deleted file mode 100644 index 30bfc78d9e..0000000000 --- a/xen/include/asm-arm/arm64/io.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Based on linux arch/arm64/include/asm/io.h which is in turn - * Based on arch/arm/include/asm/io.h - * - * Copyright (C) 1996-2000 Russell King - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef _ARM_ARM64_IO_H -#define _ARM_ARM64_IO_H - -#include -#include -#include - -/* - * Generic IO read/write. These perform native-endian accesses. - */ -static inline void __raw_writeb(u8 val, volatile void __iomem *addr) -{ - asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); -} - -static inline void __raw_writew(u16 val, volatile void __iomem *addr) -{ - asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); -} - -static inline void __raw_writel(u32 val, volatile void __iomem *addr) -{ - asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); -} - -static inline void __raw_writeq(u64 val, volatile void __iomem *addr) -{ - asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); -} - -static inline u8 __raw_readb(const volatile void __iomem *addr) -{ - u8 val; - asm volatile(ALTERNATIVE("ldrb %w0, [%1]", - "ldarb %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) - : "=r" (val) : "r" (addr)); - return val; -} - -static inline u16 __raw_readw(const volatile void __iomem *addr) -{ - u16 val; - asm volatile(ALTERNATIVE("ldrh %w0, [%1]", - "ldarh %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) - : "=r" (val) : "r" (addr)); - return val; -} - -static inline u32 __raw_readl(const volatile void __iomem *addr) -{ - u32 val; - asm volatile(ALTERNATIVE("ldr %w0, [%1]", - "ldar %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) - : "=r" (val) : "r" (addr)); - return val; -} - -static inline u64 __raw_readq(const volatile void __iomem *addr) -{ - u64 val; - asm volatile(ALTERNATIVE("ldr %0, [%1]", - "ldar %0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) - : "=r" (val) : "r" (addr)); - return val; -} - -/* IO barriers */ -#define __iormb() rmb() -#define __iowmb() wmb() - -#define mmiowb() do { } while (0) - -/* - * Relaxed I/O memory access primitives. These follow the Device memory - * ordering rules but do not guarantee any ordering relative to Normal memory - * accesses. - */ -#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) -#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) -#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) -#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) - -#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) -#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) -#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) -#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) - -/* - * I/O memory access primitives. Reads are ordered relative to any - * following Normal memory access. Writes are ordered relative to any prior - * Normal memory access. - */ -#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) -#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) -#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) -#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) - -#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) -#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) -#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) -#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) - -/* - * Emulate x86 io ports for ARM. - */ -static inline int emulate_read(u64 addr) -{ - printk(XENLOG_G_WARNING "Can't access IO %lx\n", addr); - return 0; -} - -static inline void emulate_write(u64 addr) -{ - printk(XENLOG_G_WARNING "Can't access IO %lx\n", addr); -} - -#define inb(c) ( emulate_read(c) ) -#define inw(c) ( emulate_read(c) ) -#define inl(c) ( emulate_read(c) ) - -#define outb(v, c) ( emulate_write(c) ) -#define outw(v, c) ( emulate_write(c) ) -#define outl(v, c) ( emulate_write(c) ) - -#endif /* _ARM_ARM64_IO_H */ diff --git a/xen/include/asm-arm/arm64/macros.h b/xen/include/asm-arm/arm64/macros.h deleted file mode 100644 index 5ad66efd6b..0000000000 --- a/xen/include/asm-arm/arm64/macros.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef __ASM_ARM_ARM64_MACROS_H -#define __ASM_ARM_ARM64_MACROS_H - - /* - * @dst: Result of get_cpu_info() - */ - .macro adr_cpu_info, dst - add \dst, sp, #STACK_SIZE - and \dst, \dst, #~(STACK_SIZE - 1) - sub \dst, \dst, #CPUINFO_sizeof - .endm - - /* - * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id())) - * @sym: The name of the per-cpu variable - * @tmp: scratch register - */ - .macro ldr_this_cpu, dst, sym, tmp - ldr \dst, =per_cpu__\sym - mrs \tmp, tpidr_el2 - ldr \dst, [\dst, \tmp] - .endm - - .macro ret - /* ret opcode */ - .inst 0xd65f03c0 - sb - .endm - -/* - * Register aliases. - */ -lr .req x30 /* link register */ - -#endif /* __ASM_ARM_ARM64_MACROS_H */ - diff --git a/xen/include/asm-arm/arm64/mm.h b/xen/include/asm-arm/arm64/mm.h deleted file mode 100644 index d0a3be7e15..0000000000 --- a/xen/include/asm-arm/arm64/mm.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ARM_ARM64_MM_H__ -#define __ARM_ARM64_MM_H__ - -/* - * On ARM64, all the RAM is currently direct mapped in Xen. - * Hence return always true. - */ -static inline bool arch_mfn_in_directmap(unsigned long mfn) -{ - return true; -} - -#endif /* __ARM_ARM64_MM_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h deleted file mode 100644 index 0cba266373..0000000000 --- a/xen/include/asm-arm/arm64/page.h +++ /dev/null @@ -1,103 +0,0 @@ -#ifndef __ARM_ARM64_PAGE_H__ -#define __ARM_ARM64_PAGE_H__ - -#ifndef __ASSEMBLY__ - -#include - -/* Write a pagetable entry */ -static inline void write_pte(lpae_t *p, lpae_t pte) -{ - asm volatile ( - /* Ensure any writes have completed with the old mappings. */ - "dsb sy;" - "str %0, [%1];" /* Write the entry */ - "dsb sy;" - : : "r" (pte.bits), "r" (p) : "memory"); -} - -/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */ -#define __invalidate_dcache_one(R) "dc ivac, %" #R ";" - -/* Inline ASM to flush dcache on register R (may be an inline asm operand) */ -#define __clean_dcache_one(R) \ - ALTERNATIVE("dc cvac, %" #R ";", \ - "dc civac, %" #R ";", \ - ARM64_WORKAROUND_CLEAN_CACHE) \ - -/* Inline ASM to clean and invalidate dcache on register R (may be an - * inline asm operand) */ -#define __clean_and_invalidate_dcache_one(R) "dc civac, %" #R ";" - -/* Invalidate all instruction caches in Inner Shareable domain to PoU */ -static inline void invalidate_icache(void) -{ - asm volatile ("ic ialluis"); - dsb(ish); /* Ensure completion of the flush I-cache */ - isb(); -} - -/* Invalidate all instruction caches on the local processor to PoU */ -static inline void invalidate_icache_local(void) -{ - asm volatile ("ic iallu"); - dsb(nsh); /* Ensure completion of the I-cache flush */ - isb(); -} - -/* Ask the MMU to translate a VA for us */ -static inline uint64_t __va_to_par(vaddr_t va) -{ - uint64_t par, tmp = READ_SYSREG64(PAR_EL1); - - asm volatile ("at s1e2r, %0;" : : "r" (va)); - isb(); - par = READ_SYSREG64(PAR_EL1); - WRITE_SYSREG64(tmp, PAR_EL1); - return par; -} - -/* Ask the MMU to translate a Guest VA for us */ -static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags) -{ - uint64_t par, tmp = READ_SYSREG64(PAR_EL1); - - if ( (flags & GV2M_WRITE) == GV2M_WRITE ) - asm volatile ("at s12e1w, %0;" : : "r" (va)); - else - asm volatile ("at s12e1r, %0;" : : "r" (va)); - isb(); - par = READ_SYSREG64(PAR_EL1); - WRITE_SYSREG64(tmp, PAR_EL1); - return par; -} - -static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags) -{ - uint64_t par, tmp = READ_SYSREG64(PAR_EL1); - - if ( (flags & GV2M_WRITE) == GV2M_WRITE ) - asm volatile ("at s1e1w, %0;" : : "r" (va)); - else - asm volatile ("at s1e1r, %0;" : : "r" (va)); - isb(); - par = READ_SYSREG64(PAR_EL1); - WRITE_SYSREG64(tmp, PAR_EL1); - return par; -} - -extern void clear_page(void *to); - -#endif /* __ASSEMBLY__ */ - -#endif /* __ARM_ARM64_PAGE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/processor.h b/xen/include/asm-arm/arm64/processor.h deleted file mode 100644 index c749f80ad9..0000000000 --- a/xen/include/asm-arm/arm64/processor.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef __ASM_ARM_ARM64_PROCESSOR_H -#define __ASM_ARM_ARM64_PROCESSOR_H - -#ifndef __ASSEMBLY__ - -/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ - -#define __DECL_REG(n64, n32) union { \ - uint64_t n64; \ - uint32_t n32; \ -} - -/* On stack VCPU state */ -struct cpu_user_regs -{ - /* - * The mapping AArch64 <-> AArch32 is based on D1.20.1 in ARM DDI - * 0487A.d. - * - * AArch64 AArch32 - */ - __DECL_REG(x0, r0/*_usr*/); - __DECL_REG(x1, r1/*_usr*/); - __DECL_REG(x2, r2/*_usr*/); - __DECL_REG(x3, r3/*_usr*/); - __DECL_REG(x4, r4/*_usr*/); - __DECL_REG(x5, r5/*_usr*/); - __DECL_REG(x6, r6/*_usr*/); - __DECL_REG(x7, r7/*_usr*/); - __DECL_REG(x8, r8/*_usr*/); - __DECL_REG(x9, r9/*_usr*/); - __DECL_REG(x10, r10/*_usr*/); - __DECL_REG(x11 , r11/*_usr*/); - __DECL_REG(x12, r12/*_usr*/); - - __DECL_REG(x13, /* r13_usr */ sp_usr); - __DECL_REG(x14, /* r14_usr */ lr_usr); - - __DECL_REG(x15, /* r13_hyp */ __unused_sp_hyp); - - __DECL_REG(x16, /* r14_irq */ lr_irq); - __DECL_REG(x17, /* r13_irq */ sp_irq); - - __DECL_REG(x18, /* r14_svc */ lr_svc); - __DECL_REG(x19, /* r13_svc */ sp_svc); - - __DECL_REG(x20, /* r14_abt */ lr_abt); - __DECL_REG(x21, /* r13_abt */ sp_abt); - - __DECL_REG(x22, /* r14_und */ lr_und); - __DECL_REG(x23, /* r13_und */ sp_und); - - __DECL_REG(x24, r8_fiq); - __DECL_REG(x25, r9_fiq); - __DECL_REG(x26, r10_fiq); - __DECL_REG(x27, r11_fiq); - __DECL_REG(x28, r12_fiq); - __DECL_REG(/* x29 */ fp, /* r13_fiq */ sp_fiq); - - __DECL_REG(/* x30 */ lr, /* r14_fiq */ lr_fiq); - - register_t sp; /* Valid for hypervisor frames */ - - /* Return address and mode */ - __DECL_REG(pc, pc32); /* ELR_EL2 */ - uint64_t cpsr; /* SPSR_EL2 */ - uint64_t hsr; /* ESR_EL2 */ - - /* The kernel frame should be 16-byte aligned. */ - uint64_t pad0; - - /* Outer guest frame only from here on... */ - - union { - uint64_t spsr_el1; /* AArch64 */ - uint32_t spsr_svc; /* AArch32 */ - }; - - /* AArch32 guests only */ - uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; - - /* AArch64 guests only */ - uint64_t sp_el0; - uint64_t sp_el1, elr_el1; -}; - -#undef __DECL_REG - -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_ARM_ARM64_PROCESSOR_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/sysregs.h b/xen/include/asm-arm/arm64/sysregs.h deleted file mode 100644 index d7e4772f21..0000000000 --- a/xen/include/asm-arm/arm64/sysregs.h +++ /dev/null @@ -1,423 +0,0 @@ -#ifndef __ASM_ARM_ARM64_SYSREGS_H -#define __ASM_ARM_ARM64_SYSREGS_H - -#include - -/* - * GIC System register assembly aliases picked from kernel - */ -#define ICC_PMR_EL1 S3_0_C4_C6_0 -#define ICC_DIR_EL1 S3_0_C12_C11_1 -#define ICC_SGI1R_EL1 S3_0_C12_C11_5 -#define ICC_EOIR1_EL1 S3_0_C12_C12_1 -#define ICC_IAR1_EL1 S3_0_C12_C12_0 -#define ICC_BPR1_EL1 S3_0_C12_C12_3 -#define ICC_CTLR_EL1 S3_0_C12_C12_4 -#define ICC_SRE_EL1 S3_0_C12_C12_5 -#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7 - -#define ICH_VSEIR_EL2 S3_4_C12_C9_4 -#define ICC_SRE_EL2 S3_4_C12_C9_5 -#define ICH_HCR_EL2 S3_4_C12_C11_0 -#define ICH_VTR_EL2 S3_4_C12_C11_1 -#define ICH_MISR_EL2 S3_4_C12_C11_2 -#define ICH_EISR_EL2 S3_4_C12_C11_3 -#define ICH_ELSR_EL2 S3_4_C12_C11_5 -#define ICH_VMCR_EL2 S3_4_C12_C11_7 - -#define __LR0_EL2(x) S3_4_C12_C12_ ## x -#define __LR8_EL2(x) S3_4_C12_C13_ ## x - -#define ICH_LR0_EL2 __LR0_EL2(0) -#define ICH_LR1_EL2 __LR0_EL2(1) -#define ICH_LR2_EL2 __LR0_EL2(2) -#define ICH_LR3_EL2 __LR0_EL2(3) -#define ICH_LR4_EL2 __LR0_EL2(4) -#define ICH_LR5_EL2 __LR0_EL2(5) -#define ICH_LR6_EL2 __LR0_EL2(6) -#define ICH_LR7_EL2 __LR0_EL2(7) -#define ICH_LR8_EL2 __LR8_EL2(0) -#define ICH_LR9_EL2 __LR8_EL2(1) -#define ICH_LR10_EL2 __LR8_EL2(2) -#define ICH_LR11_EL2 __LR8_EL2(3) -#define ICH_LR12_EL2 __LR8_EL2(4) -#define ICH_LR13_EL2 __LR8_EL2(5) -#define ICH_LR14_EL2 __LR8_EL2(6) -#define ICH_LR15_EL2 __LR8_EL2(7) - -#define __AP0Rx_EL2(x) S3_4_C12_C8_ ## x -#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) -#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) -#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) -#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) - -#define __AP1Rx_EL2(x) S3_4_C12_C9_ ## x -#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) -#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) -#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) -#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) - -/* - * Define ID coprocessor registers if they are not - * already defined by the compiler. - * - * Values picked from linux kernel - */ -#ifndef ID_AA64MMFR2_EL1 -#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2 -#endif -#ifndef ID_PFR2_EL1 -#define ID_PFR2_EL1 S3_0_C0_C3_4 -#endif -#ifndef ID_MMFR4_EL1 -#define ID_MMFR4_EL1 S3_0_C0_C2_6 -#endif -#ifndef ID_MMFR5_EL1 -#define ID_MMFR5_EL1 S3_0_C0_C3_6 -#endif -#ifndef ID_ISAR6_EL1 -#define ID_ISAR6_EL1 S3_0_C0_C2_7 -#endif -#ifndef ID_AA64ZFR0_EL1 -#define ID_AA64ZFR0_EL1 S3_0_C0_C4_4 -#endif -#ifndef ID_DFR1_EL1 -#define ID_DFR1_EL1 S3_0_C0_C3_5 -#endif - -/* ID registers (imported from arm64/include/asm/sysreg.h in Linux) */ - -/* id_aa64isar0 */ -#define ID_AA64ISAR0_RNDR_SHIFT 60 -#define ID_AA64ISAR0_TLB_SHIFT 56 -#define ID_AA64ISAR0_TS_SHIFT 52 -#define ID_AA64ISAR0_FHM_SHIFT 48 -#define ID_AA64ISAR0_DP_SHIFT 44 -#define ID_AA64ISAR0_SM4_SHIFT 40 -#define ID_AA64ISAR0_SM3_SHIFT 36 -#define ID_AA64ISAR0_SHA3_SHIFT 32 -#define ID_AA64ISAR0_RDM_SHIFT 28 -#define ID_AA64ISAR0_ATOMICS_SHIFT 20 -#define ID_AA64ISAR0_CRC32_SHIFT 16 -#define ID_AA64ISAR0_SHA2_SHIFT 12 -#define ID_AA64ISAR0_SHA1_SHIFT 8 -#define ID_AA64ISAR0_AES_SHIFT 4 - -#define ID_AA64ISAR0_TLB_RANGE_NI 0x0 -#define ID_AA64ISAR0_TLB_RANGE 0x2 - -/* id_aa64isar1 */ -#define ID_AA64ISAR1_I8MM_SHIFT 52 -#define ID_AA64ISAR1_DGH_SHIFT 48 -#define ID_AA64ISAR1_BF16_SHIFT 44 -#define ID_AA64ISAR1_SPECRES_SHIFT 40 -#define ID_AA64ISAR1_SB_SHIFT 36 -#define ID_AA64ISAR1_FRINTTS_SHIFT 32 -#define ID_AA64ISAR1_GPI_SHIFT 28 -#define ID_AA64ISAR1_GPA_SHIFT 24 -#define ID_AA64ISAR1_LRCPC_SHIFT 20 -#define ID_AA64ISAR1_FCMA_SHIFT 16 -#define ID_AA64ISAR1_JSCVT_SHIFT 12 -#define ID_AA64ISAR1_API_SHIFT 8 -#define ID_AA64ISAR1_APA_SHIFT 4 -#define ID_AA64ISAR1_DPB_SHIFT 0 - -#define ID_AA64ISAR1_APA_NI 0x0 -#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 -#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_API_NI 0x0 -#define ID_AA64ISAR1_API_IMP_DEF 0x1 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_GPA_NI 0x0 -#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_GPI_NI 0x0 -#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 - -/* id_aa64pfr0 */ -#define ID_AA64PFR0_CSV3_SHIFT 60 -#define ID_AA64PFR0_CSV2_SHIFT 56 -#define ID_AA64PFR0_DIT_SHIFT 48 -#define ID_AA64PFR0_AMU_SHIFT 44 -#define ID_AA64PFR0_MPAM_SHIFT 40 -#define ID_AA64PFR0_SEL2_SHIFT 36 -#define ID_AA64PFR0_SVE_SHIFT 32 -#define ID_AA64PFR0_RAS_SHIFT 28 -#define ID_AA64PFR0_GIC_SHIFT 24 -#define ID_AA64PFR0_ASIMD_SHIFT 20 -#define ID_AA64PFR0_FP_SHIFT 16 -#define ID_AA64PFR0_EL3_SHIFT 12 -#define ID_AA64PFR0_EL2_SHIFT 8 -#define ID_AA64PFR0_EL1_SHIFT 4 -#define ID_AA64PFR0_EL0_SHIFT 0 - -#define ID_AA64PFR0_AMU 0x1 -#define ID_AA64PFR0_SVE 0x1 -#define ID_AA64PFR0_RAS_V1 0x1 -#define ID_AA64PFR0_FP_NI 0xf -#define ID_AA64PFR0_FP_SUPPORTED 0x0 -#define ID_AA64PFR0_ASIMD_NI 0xf -#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 -#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1 -#define ID_AA64PFR0_EL1_32BIT_64BIT 0x2 -#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1 -#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2 - -/* id_aa64pfr1 */ -#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 -#define ID_AA64PFR1_RASFRAC_SHIFT 12 -#define ID_AA64PFR1_MTE_SHIFT 8 -#define ID_AA64PFR1_SSBS_SHIFT 4 -#define ID_AA64PFR1_BT_SHIFT 0 - -#define ID_AA64PFR1_SSBS_PSTATE_NI 0 -#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 -#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 -#define ID_AA64PFR1_BT_BTI 0x1 - -#define ID_AA64PFR1_MTE_NI 0x0 -#define ID_AA64PFR1_MTE_EL0 0x1 -#define ID_AA64PFR1_MTE 0x2 - -/* id_aa64zfr0 */ -#define ID_AA64ZFR0_F64MM_SHIFT 56 -#define ID_AA64ZFR0_F32MM_SHIFT 52 -#define ID_AA64ZFR0_I8MM_SHIFT 44 -#define ID_AA64ZFR0_SM4_SHIFT 40 -#define ID_AA64ZFR0_SHA3_SHIFT 32 -#define ID_AA64ZFR0_BF16_SHIFT 20 -#define ID_AA64ZFR0_BITPERM_SHIFT 16 -#define ID_AA64ZFR0_AES_SHIFT 4 -#define ID_AA64ZFR0_SVEVER_SHIFT 0 - -#define ID_AA64ZFR0_F64MM 0x1 -#define ID_AA64ZFR0_F32MM 0x1 -#define ID_AA64ZFR0_I8MM 0x1 -#define ID_AA64ZFR0_BF16 0x1 -#define ID_AA64ZFR0_SM4 0x1 -#define ID_AA64ZFR0_SHA3 0x1 -#define ID_AA64ZFR0_BITPERM 0x1 -#define ID_AA64ZFR0_AES 0x1 -#define ID_AA64ZFR0_AES_PMULL 0x2 -#define ID_AA64ZFR0_SVEVER_SVE2 0x1 - -/* id_aa64mmfr0 */ -#define ID_AA64MMFR0_ECV_SHIFT 60 -#define ID_AA64MMFR0_FGT_SHIFT 56 -#define ID_AA64MMFR0_EXS_SHIFT 44 -#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 -#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 -#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 -#define ID_AA64MMFR0_TGRAN4_SHIFT 28 -#define ID_AA64MMFR0_TGRAN64_SHIFT 24 -#define ID_AA64MMFR0_TGRAN16_SHIFT 20 -#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 -#define ID_AA64MMFR0_SNSMEM_SHIFT 12 -#define ID_AA64MMFR0_BIGENDEL_SHIFT 8 -#define ID_AA64MMFR0_ASID_SHIFT 4 -#define ID_AA64MMFR0_PARANGE_SHIFT 0 - -#define ID_AA64MMFR0_TGRAN4_NI 0xf -#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN64_NI 0xf -#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN16_NI 0x0 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 -#define ID_AA64MMFR0_PARANGE_48 0x5 -#define ID_AA64MMFR0_PARANGE_52 0x6 - -/* id_aa64mmfr1 */ -#define ID_AA64MMFR1_ETS_SHIFT 36 -#define ID_AA64MMFR1_TWED_SHIFT 32 -#define ID_AA64MMFR1_XNX_SHIFT 28 -#define ID_AA64MMFR1_SPECSEI_SHIFT 24 -#define ID_AA64MMFR1_PAN_SHIFT 20 -#define ID_AA64MMFR1_LOR_SHIFT 16 -#define ID_AA64MMFR1_HPD_SHIFT 12 -#define ID_AA64MMFR1_VHE_SHIFT 8 -#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 -#define ID_AA64MMFR1_HADBS_SHIFT 0 - -#define ID_AA64MMFR1_VMIDBITS_8 0 -#define ID_AA64MMFR1_VMIDBITS_16 2 - -/* id_aa64mmfr2 */ -#define ID_AA64MMFR2_E0PD_SHIFT 60 -#define ID_AA64MMFR2_EVT_SHIFT 56 -#define ID_AA64MMFR2_BBM_SHIFT 52 -#define ID_AA64MMFR2_TTL_SHIFT 48 -#define ID_AA64MMFR2_FWB_SHIFT 40 -#define ID_AA64MMFR2_IDS_SHIFT 36 -#define ID_AA64MMFR2_AT_SHIFT 32 -#define ID_AA64MMFR2_ST_SHIFT 28 -#define ID_AA64MMFR2_NV_SHIFT 24 -#define ID_AA64MMFR2_CCIDX_SHIFT 20 -#define ID_AA64MMFR2_LVA_SHIFT 16 -#define ID_AA64MMFR2_IESB_SHIFT 12 -#define ID_AA64MMFR2_LSM_SHIFT 8 -#define ID_AA64MMFR2_UAO_SHIFT 4 -#define ID_AA64MMFR2_CNP_SHIFT 0 - -/* id_aa64dfr0 */ -#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 -#define ID_AA64DFR0_PMSVER_SHIFT 32 -#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 -#define ID_AA64DFR0_WRPS_SHIFT 20 -#define ID_AA64DFR0_BRPS_SHIFT 12 -#define ID_AA64DFR0_PMUVER_SHIFT 8 -#define ID_AA64DFR0_TRACEVER_SHIFT 4 -#define ID_AA64DFR0_DEBUGVER_SHIFT 0 - -#define ID_AA64DFR0_PMUVER_8_0 0x1 -#define ID_AA64DFR0_PMUVER_8_1 0x4 -#define ID_AA64DFR0_PMUVER_8_4 0x5 -#define ID_AA64DFR0_PMUVER_8_5 0x6 -#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf - -#define ID_DFR0_PERFMON_SHIFT 24 - -#define ID_DFR0_PERFMON_8_1 0x4 - -#define ID_ISAR4_SWP_FRAC_SHIFT 28 -#define ID_ISAR4_PSR_M_SHIFT 24 -#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20 -#define ID_ISAR4_BARRIER_SHIFT 16 -#define ID_ISAR4_SMC_SHIFT 12 -#define ID_ISAR4_WRITEBACK_SHIFT 8 -#define ID_ISAR4_WITHSHIFTS_SHIFT 4 -#define ID_ISAR4_UNPRIV_SHIFT 0 - -#define ID_DFR1_MTPMU_SHIFT 0 - -#define ID_ISAR0_DIVIDE_SHIFT 24 -#define ID_ISAR0_DEBUG_SHIFT 20 -#define ID_ISAR0_COPROC_SHIFT 16 -#define ID_ISAR0_CMPBRANCH_SHIFT 12 -#define ID_ISAR0_BITFIELD_SHIFT 8 -#define ID_ISAR0_BITCOUNT_SHIFT 4 -#define ID_ISAR0_SWAP_SHIFT 0 - -#define ID_ISAR5_RDM_SHIFT 24 -#define ID_ISAR5_CRC32_SHIFT 16 -#define ID_ISAR5_SHA2_SHIFT 12 -#define ID_ISAR5_SHA1_SHIFT 8 -#define ID_ISAR5_AES_SHIFT 4 -#define ID_ISAR5_SEVL_SHIFT 0 - -#define ID_ISAR6_I8MM_SHIFT 24 -#define ID_ISAR6_BF16_SHIFT 20 -#define ID_ISAR6_SPECRES_SHIFT 16 -#define ID_ISAR6_SB_SHIFT 12 -#define ID_ISAR6_FHM_SHIFT 8 -#define ID_ISAR6_DP_SHIFT 4 -#define ID_ISAR6_JSCVT_SHIFT 0 - -#define ID_MMFR0_INNERSHR_SHIFT 28 -#define ID_MMFR0_FCSE_SHIFT 24 -#define ID_MMFR0_AUXREG_SHIFT 20 -#define ID_MMFR0_TCM_SHIFT 16 -#define ID_MMFR0_SHARELVL_SHIFT 12 -#define ID_MMFR0_OUTERSHR_SHIFT 8 -#define ID_MMFR0_PMSA_SHIFT 4 -#define ID_MMFR0_VMSA_SHIFT 0 - -#define ID_MMFR4_EVT_SHIFT 28 -#define ID_MMFR4_CCIDX_SHIFT 24 -#define ID_MMFR4_LSM_SHIFT 20 -#define ID_MMFR4_HPDS_SHIFT 16 -#define ID_MMFR4_CNP_SHIFT 12 -#define ID_MMFR4_XNX_SHIFT 8 -#define ID_MMFR4_AC2_SHIFT 4 -#define ID_MMFR4_SPECSEI_SHIFT 0 - -#define ID_MMFR5_ETS_SHIFT 0 - -#define ID_PFR0_DIT_SHIFT 24 -#define ID_PFR0_CSV2_SHIFT 16 -#define ID_PFR0_STATE3_SHIFT 12 -#define ID_PFR0_STATE2_SHIFT 8 -#define ID_PFR0_STATE1_SHIFT 4 -#define ID_PFR0_STATE0_SHIFT 0 - -#define ID_DFR0_PERFMON_SHIFT 24 -#define ID_DFR0_MPROFDBG_SHIFT 20 -#define ID_DFR0_MMAPTRC_SHIFT 16 -#define ID_DFR0_COPTRC_SHIFT 12 -#define ID_DFR0_MMAPDBG_SHIFT 8 -#define ID_DFR0_COPSDBG_SHIFT 4 -#define ID_DFR0_COPDBG_SHIFT 0 - -#define ID_PFR2_SSBS_SHIFT 4 -#define ID_PFR2_CSV3_SHIFT 0 - -#define MVFR0_FPROUND_SHIFT 28 -#define MVFR0_FPSHVEC_SHIFT 24 -#define MVFR0_FPSQRT_SHIFT 20 -#define MVFR0_FPDIVIDE_SHIFT 16 -#define MVFR0_FPTRAP_SHIFT 12 -#define MVFR0_FPDP_SHIFT 8 -#define MVFR0_FPSP_SHIFT 4 -#define MVFR0_SIMD_SHIFT 0 - -#define MVFR1_SIMDFMAC_SHIFT 28 -#define MVFR1_FPHP_SHIFT 24 -#define MVFR1_SIMDHP_SHIFT 20 -#define MVFR1_SIMDSP_SHIFT 16 -#define MVFR1_SIMDINT_SHIFT 12 -#define MVFR1_SIMDLS_SHIFT 8 -#define MVFR1_FPDNAN_SHIFT 4 -#define MVFR1_FPFTZ_SHIFT 0 - -#define ID_PFR1_GIC_SHIFT 28 -#define ID_PFR1_VIRT_FRAC_SHIFT 24 -#define ID_PFR1_SEC_FRAC_SHIFT 20 -#define ID_PFR1_GENTIMER_SHIFT 16 -#define ID_PFR1_VIRTUALIZATION_SHIFT 12 -#define ID_PFR1_MPROGMOD_SHIFT 8 -#define ID_PFR1_SECURITY_SHIFT 4 -#define ID_PFR1_PROGMOD_SHIFT 0 - -#define MVFR2_FPMISC_SHIFT 4 -#define MVFR2_SIMDMISC_SHIFT 0 - -#define DCZID_DZP_SHIFT 4 -#define DCZID_BS_SHIFT 0 - -/* - * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which - * are reserved by the SVE architecture for future expansion of the LEN - * field, with compatible semantics. - */ -#define ZCR_ELx_LEN_SHIFT 0 -#define ZCR_ELx_LEN_SIZE 9 -#define ZCR_ELx_LEN_MASK 0x1ff - -/* Access to system registers */ - -#define WRITE_SYSREG64(v, name) do { \ - uint64_t _r = v; \ - asm volatile("msr "__stringify(name)", %0" : : "r" (_r)); \ -} while (0) -#define READ_SYSREG64(name) ({ \ - uint64_t _r; \ - asm volatile("mrs %0, "__stringify(name) : "=r" (_r)); \ - _r; }) - -#define READ_SYSREG(name) READ_SYSREG64(name) -#define WRITE_SYSREG(v, name) WRITE_SYSREG64(v, name) - -#endif /* _ASM_ARM_ARM64_SYSREGS_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h deleted file mode 100644 index 2e36573ac6..0000000000 --- a/xen/include/asm-arm/arm64/system.h +++ /dev/null @@ -1,91 +0,0 @@ -/* Portions taken from Linux arch arm64 */ -#ifndef __ASM_ARM64_SYSTEM_H -#define __ASM_ARM64_SYSTEM_H - -#include - -/* Uses uimm4 as a bitmask to select the clearing of one or more of - * the DAIF exception mask bits: - * bit 3 selects the D mask, - * bit 2 the A mask, - * bit 1 the I mask and - * bit 0 the F mask. -*/ - -#define local_fiq_disable() asm volatile ( "msr daifset, #1\n" ::: "memory" ) -#define local_fiq_enable() asm volatile ( "msr daifclr, #1\n" ::: "memory" ) -#define local_irq_disable() asm volatile ( "msr daifset, #2\n" ::: "memory" ) -#define local_irq_enable() asm volatile ( "msr daifclr, #2\n" ::: "memory" ) -#define local_abort_disable() asm volatile ( "msr daifset, #4\n" ::: "memory" ) -#define local_abort_enable() asm volatile ( "msr daifclr, #4\n" ::: "memory" ) - -#define local_save_flags(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile( \ - "mrs %0, daif // local_save_flags\n" \ - : "=r" (x) \ - : \ - : "memory"); \ -}) - -#define local_irq_save(x) \ -({ \ - local_save_flags(x); \ - local_irq_disable(); \ -}) -#define local_irq_restore(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile ( \ - "msr daif, %0 // local_irq_restore" \ - : \ - : "r" (x) \ - : "memory"); \ -}) - -static inline int local_irq_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !(flags & PSR_IRQ_MASK); -} - -static inline int local_fiq_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !(flags & PSR_FIQ_MASK); -} - -#define csdb() asm volatile ( "hint #20" : : : "memory" ) - -/* - * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz - * and 0 otherwise. - */ -static inline unsigned long array_index_mask_nospec(unsigned long idx, - unsigned long sz) -{ - unsigned long mask; - - asm volatile ( "cmp %1, %2\n" - "sbc %0, xzr, xzr\n" - : "=r" (mask) - : "r" (idx), "Ir" (sz) - : "cc" ); - csdb(); - - return mask; -} -#define array_index_mask_nospec array_index_mask_nospec - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/arm64/traps.h b/xen/include/asm-arm/arm64/traps.h deleted file mode 100644 index 2379b578cb..0000000000 --- a/xen/include/asm-arm/arm64/traps.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef __ASM_ARM64_TRAPS__ -#define __ASM_ARM64_TRAPS__ - -void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len); - -void do_sysreg(struct cpu_user_regs *regs, - const union hsr hsr); - -#endif /* __ASM_ARM64_TRAPS__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ - diff --git a/xen/include/asm-arm/arm64/vfp.h b/xen/include/asm-arm/arm64/vfp.h deleted file mode 100644 index e6e8c363bc..0000000000 --- a/xen/include/asm-arm/arm64/vfp.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _ARM_ARM64_VFP_H -#define _ARM_ARM64_VFP_H - -/* ARM64 VFP instruction requires fpregs address to be 128-byte aligned */ -#define __vfp_aligned __attribute__((aligned(16))) - -struct vfp_state -{ - uint64_t fpregs[64] __vfp_aligned; - register_t fpcr; - register_t fpexc32_el2; - register_t fpsr; -}; - -#endif /* _ARM_ARM64_VFP_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/asm_defns.h b/xen/include/asm-arm/asm_defns.h deleted file mode 100644 index 29a9dbb002..0000000000 --- a/xen/include/asm-arm/asm_defns.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef __ARM_ASM_DEFNS_H__ -#define __ARM_ASM_DEFNS_H__ - -#ifndef COMPILE_OFFSETS -/* NB. Auto-generated from arch/.../asm-offsets.c */ -#include -#endif -#include - -/* Macros for generic assembly code */ -#if defined(CONFIG_ARM_32) -# define __OP32 -# define ASM_REG(index) asm("r" # index) -#elif defined(CONFIG_ARM_64) -# define __OP32 "w" -/* - * Clang < 8.0 doesn't support register alllocation using the syntax rN. - * See https://reviews.llvm.org/rL328829. - */ -# define ASM_REG(index) asm("x" # index) -#else -# error "unknown ARM variant" -#endif - -#define RODATA_STR(label, msg) \ -.pushsection .rodata.str, "aMS", %progbits, 1 ; \ -label: .asciz msg; \ -.popsection - -#define ASM_INT(label, val) \ - .p2align 2; \ -label: .long (val); \ - .size label, . - label; \ - .type label, %object - -#endif /* __ARM_ASM_DEFNS_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h deleted file mode 100644 index ac2798d095..0000000000 --- a/xen/include/asm-arm/atomic.h +++ /dev/null @@ -1,236 +0,0 @@ -#ifndef __ARCH_ARM_ATOMIC__ -#define __ARCH_ARM_ATOMIC__ - -#include -#include -#include - -#define build_atomic_read(name, size, width, type) \ -static inline type name(const volatile type *addr) \ -{ \ - type ret; \ - asm volatile("ldr" size " %" width(0) ",%1" \ - : "=r" (ret) \ - : "m" (*addr)); \ - return ret; \ -} - -#define build_atomic_write(name, size, width, type) \ -static inline void name(volatile type *addr, type val) \ -{ \ - asm volatile("str" size " %" width(1) ",%0" \ - : "=m" (*addr) \ - : "r" (val)); \ -} - -#define build_add_sized(name, size, width, type) \ -static inline void name(volatile type *addr, type val) \ -{ \ - type t; \ - asm volatile("ldr" size " %" width(1) ",%0\n" \ - "add %" width(1) ",%" width(1) ",%" width(2) "\n" \ - "str" size " %" width(1) ",%0" \ - : "+m" (*addr), "=&r" (t) \ - : "ri" (val)); \ -} - -#if defined (CONFIG_ARM_32) -#define BYTE(n) #n -#define WORD(n) #n -#define DWORD(n) "" #n ",%H" #n -#define PAIR "d" -#elif defined (CONFIG_ARM_64) -#define BYTE(n) "w" #n -#define WORD(n) "w" #n -#define DWORD(n) "" #n -#define PAIR "" -#endif - -build_atomic_read(read_u8_atomic, "b", BYTE, uint8_t) -build_atomic_read(read_u16_atomic, "h", WORD, uint16_t) -build_atomic_read(read_u32_atomic, "", WORD, uint32_t) -build_atomic_read(read_u64_atomic, PAIR, DWORD, uint64_t) -build_atomic_read(read_int_atomic, "", WORD, int) - -build_atomic_write(write_u8_atomic, "b", BYTE, uint8_t) -build_atomic_write(write_u16_atomic, "h", WORD, uint16_t) -build_atomic_write(write_u32_atomic, "", WORD, uint32_t) -build_atomic_write(write_u64_atomic, PAIR, DWORD, uint64_t) -build_atomic_write(write_int_atomic, "", WORD, int) - -build_add_sized(add_u8_sized, "b", BYTE, uint8_t) -build_add_sized(add_u16_sized, "h", WORD, uint16_t) -build_add_sized(add_u32_sized, "", WORD, uint32_t) - -#undef BYTE -#undef WORD -#undef DWORD -#undef PAIR - -#undef build_atomic_read -#undef build_atomic_write -#undef build_add_sized - -void __bad_atomic_read(const volatile void *p, void *res); -void __bad_atomic_size(void); - -static always_inline void read_atomic_size(const volatile void *p, - void *res, - unsigned int size) -{ - switch ( size ) - { - case 1: - *(uint8_t *)res = read_u8_atomic(p); - break; - case 2: - *(uint16_t *)res = read_u16_atomic(p); - break; - case 4: - *(uint32_t *)res = read_u32_atomic(p); - break; - case 8: - *(uint64_t *)res = read_u64_atomic(p); - break; - default: - __bad_atomic_read(p, res); - break; - } -} - -static always_inline void write_atomic_size(volatile void *p, - void *val, - unsigned int size) -{ - switch ( size ) - { - case 1: - write_u8_atomic(p, *(uint8_t *)val); - break; - case 2: - write_u16_atomic(p, *(uint16_t *)val); - break; - case 4: - write_u32_atomic(p, *(uint32_t *)val); - break; - case 8: - write_u64_atomic(p, *(uint64_t *)val); - break; - default: - __bad_atomic_size(); - break; - } -} - -#define read_atomic(p) ({ \ - union { typeof(*p) val; char c[0]; } x_; \ - read_atomic_size(p, x_.c, sizeof(*p)); \ - x_.val; \ -}) - -#define write_atomic(p, x) \ - do { \ - typeof(*p) x_ = (x); \ - write_atomic_size(p, &x_, sizeof(*p)); \ - } while ( false ) - -#define add_sized(p, x) ({ \ - typeof(*(p)) __x = (x); \ - switch ( sizeof(*(p)) ) \ - { \ - case 1: add_u8_sized((uint8_t *)(p), __x); break; \ - case 2: add_u16_sized((uint16_t *)(p), __x); break; \ - case 4: add_u32_sized((uint32_t *)(p), __x); break; \ - default: __bad_atomic_size(); break; \ - } \ -}) - -/* - * On ARM, ordinary assignment (str instruction) doesn't clear the local - * strex/ldrex monitor on some implementations. The reason we can use it for - * atomic_set() is the clrex or dummy strex done on every exception return. - */ -static inline int atomic_read(const atomic_t *v) -{ - return *(volatile int *)&v->counter; -} - -static inline int _atomic_read(atomic_t v) -{ - return v.counter; -} - -static inline void atomic_set(atomic_t *v, int i) -{ - v->counter = i; -} - -static inline void _atomic_set(atomic_t *v, int i) -{ - v->counter = i; -} - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -static inline int atomic_sub_and_test(int i, atomic_t *v) -{ - return atomic_sub_return(i, v) == 0; -} - -static inline void atomic_inc(atomic_t *v) -{ - atomic_add(1, v); -} - -static inline int atomic_inc_return(atomic_t *v) -{ - return atomic_add_return(1, v); -} - -static inline int atomic_inc_and_test(atomic_t *v) -{ - return atomic_add_return(1, v) == 0; -} - -static inline void atomic_dec(atomic_t *v) -{ - atomic_sub(1, v); -} - -static inline int atomic_dec_return(atomic_t *v) -{ - return atomic_sub_return(1, v); -} - -static inline int atomic_dec_and_test(atomic_t *v) -{ - return atomic_sub_return(1, v) == 0; -} - -static inline int atomic_add_negative(int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} - -static inline int atomic_add_unless(atomic_t *v, int a, int u) -{ - return __atomic_add_unless(v, a, u); -} - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -#endif /* __ARCH_ARM_ATOMIC__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h deleted file mode 100644 index 71ae14cab3..0000000000 --- a/xen/include/asm-arm/bitops.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright 1995, Russell King. - * Various bits and pieces copyrights include: - * Linus Torvalds (test_bit). - * Big endian support: Copyright 2001, Nicolas Pitre - * reworked by rmk. - */ - -#ifndef _ARM_BITOPS_H -#define _ARM_BITOPS_H - -#include - -/* - * Non-atomic bit manipulation. - * - * Implemented using atomics to be interrupt safe. Could alternatively - * implement with local interrupt masking. - */ -#define __set_bit(n,p) set_bit(n,p) -#define __clear_bit(n,p) clear_bit(n,p) - -#define BITOP_BITS_PER_WORD 32 -#define BITOP_MASK(nr) (1UL << ((nr) % BITOP_BITS_PER_WORD)) -#define BITOP_WORD(nr) ((nr) / BITOP_BITS_PER_WORD) -#define BITS_PER_BYTE 8 - -#define ADDR (*(volatile int *) addr) -#define CONST_ADDR (*(const volatile int *) addr) - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -/* - * Atomic bitops - * - * The helpers below *should* only be used on memory shared between - * trusted threads or we know the memory cannot be accessed by another - * thread. - */ - -void set_bit(int nr, volatile void *p); -void clear_bit(int nr, volatile void *p); -void change_bit(int nr, volatile void *p); -int test_and_set_bit(int nr, volatile void *p); -int test_and_clear_bit(int nr, volatile void *p); -int test_and_change_bit(int nr, volatile void *p); - -void clear_mask16(uint16_t mask, volatile void *p); - -/* - * The helpers below may fail to update the memory if the action takes - * too long. - * - * @max_try: Maximum number of iterations - * - * The helpers will return true when the update has succeeded (i.e no - * timeout) and false if the update has failed. - */ -bool set_bit_timeout(int nr, volatile void *p, unsigned int max_try); -bool clear_bit_timeout(int nr, volatile void *p, unsigned int max_try); -bool change_bit_timeout(int nr, volatile void *p, unsigned int max_try); -bool test_and_set_bit_timeout(int nr, volatile void *p, - int *oldbit, unsigned int max_try); -bool test_and_clear_bit_timeout(int nr, volatile void *p, - int *oldbit, unsigned int max_try); -bool test_and_change_bit_timeout(int nr, volatile void *p, - int *oldbit, unsigned int max_try); -bool clear_mask16_timeout(uint16_t mask, volatile void *p, - unsigned int max_try); - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile void *addr) -{ - unsigned int mask = BITOP_MASK(nr); - volatile unsigned int *p = - ((volatile unsigned int *)addr) + BITOP_WORD(nr); - unsigned int old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile void *addr) -{ - unsigned int mask = BITOP_MASK(nr); - volatile unsigned int *p = - ((volatile unsigned int *)addr) + BITOP_WORD(nr); - unsigned int old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile void *addr) -{ - unsigned int mask = BITOP_MASK(nr); - volatile unsigned int *p = - ((volatile unsigned int *)addr) + BITOP_WORD(nr); - unsigned int old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} - -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static inline int test_bit(int nr, const volatile void *addr) -{ - const volatile unsigned int *p = (const volatile unsigned int *)addr; - return 1UL & (p[BITOP_WORD(nr)] >> (nr & (BITOP_BITS_PER_WORD-1))); -} - -/* - * On ARMv5 and above those functions can be implemented around - * the clz instruction for much better code efficiency. - */ - -static inline int fls(unsigned int x) -{ - int ret; - - if (__builtin_constant_p(x)) - return generic_fls(x); - - asm("clz\t%"__OP32"0, %"__OP32"1" : "=r" (ret) : "r" (x)); - return 32 - ret; -} - - -#define ffs(x) ({ unsigned int __t = (x); fls(__t & -__t); }) -#define ffsl(x) ({ unsigned long __t = (x); flsl(__t & -__t); }) - -/** - * find_first_set_bit - find the first set bit in @word - * @word: the word to search - * - * Returns the bit-number of the first set bit (first bit being 0). - * The input must *not* be zero. - */ -static inline unsigned int find_first_set_bit(unsigned long word) -{ - return ffsl(word) - 1; -} - -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -#endif /* _ARM_BITOPS_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/bug.h b/xen/include/asm-arm/bug.h deleted file mode 100644 index f4088d0913..0000000000 --- a/xen/include/asm-arm/bug.h +++ /dev/null @@ -1,106 +0,0 @@ -#ifndef __ARM_BUG_H__ -#define __ARM_BUG_H__ - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -#define BUG_DISP_WIDTH 24 -#define BUG_LINE_LO_WIDTH (31 - BUG_DISP_WIDTH) -#define BUG_LINE_HI_WIDTH (31 - BUG_DISP_WIDTH) - -struct bug_frame { - signed int loc_disp; /* Relative address to the bug address */ - signed int file_disp; /* Relative address to the filename */ - signed int msg_disp; /* Relative address to the predicate (for ASSERT) */ - uint16_t line; /* Line number */ - uint32_t pad0:16; /* Padding for 8-bytes align */ -}; - -#define bug_loc(b) ((const void *)(b) + (b)->loc_disp) -#define bug_file(b) ((const void *)(b) + (b)->file_disp); -#define bug_line(b) ((b)->line) -#define bug_msg(b) ((const char *)(b) + (b)->msg_disp) - -#define BUGFRAME_run_fn 0 -#define BUGFRAME_warn 1 -#define BUGFRAME_bug 2 -#define BUGFRAME_assert 3 - -#define BUGFRAME_NR 4 - -/* Many versions of GCC doesn't support the asm %c parameter which would - * be preferable to this unpleasantness. We use mergeable string - * sections to avoid multiple copies of the string appearing in the - * Xen image. BUGFRAME_run_fn needs to be handled separately. - */ -#define BUG_FRAME(type, line, file, has_msg, msg) do { \ - BUILD_BUG_ON((line) >> 16); \ - BUILD_BUG_ON((type) >= BUGFRAME_NR); \ - asm ("1:"BUG_INSTR"\n" \ - ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ - "2:\t.asciz " __stringify(file) "\n" \ - "3:\n" \ - ".if " #has_msg "\n" \ - "\t.asciz " #msg "\n" \ - ".endif\n" \ - ".popsection\n" \ - ".pushsection .bug_frames." __stringify(type) ", \"a\", %progbits\n"\ - "4:\n" \ - ".p2align 2\n" \ - ".long (1b - 4b)\n" \ - ".long (2b - 4b)\n" \ - ".long (3b - 4b)\n" \ - ".hword " __stringify(line) ", 0\n" \ - ".popsection"); \ -} while (0) - -/* - * GCC will not allow to use "i" when PIE is enabled (Xen doesn't set the - * flag but instead rely on the default value from the compiler). So the - * easiest way to implement run_in_exception_handler() is to pass the to - * be called function in a fixed register. - */ -#define run_in_exception_handler(fn) do { \ - asm ("mov " __stringify(BUG_FN_REG) ", %0\n" \ - "1:"BUG_INSTR"\n" \ - ".pushsection .bug_frames." __stringify(BUGFRAME_run_fn) "," \ - " \"a\", %%progbits\n" \ - "2:\n" \ - ".p2align 2\n" \ - ".long (1b - 2b)\n" \ - ".long 0, 0, 0\n" \ - ".popsection" :: "r" (fn) : __stringify(BUG_FN_REG) ); \ -} while (0) - -#define WARN() BUG_FRAME(BUGFRAME_warn, __LINE__, __FILE__, 0, "") - -#define BUG() do { \ - BUG_FRAME(BUGFRAME_bug, __LINE__, __FILE__, 0, ""); \ - unreachable(); \ -} while (0) - -#define assert_failed(msg) do { \ - BUG_FRAME(BUGFRAME_assert, __LINE__, __FILE__, 1, msg); \ - unreachable(); \ -} while (0) - -extern const struct bug_frame __start_bug_frames[], - __stop_bug_frames_0[], - __stop_bug_frames_1[], - __stop_bug_frames_2[], - __stop_bug_frames_3[]; - -#endif /* __ARM_BUG_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/byteorder.h b/xen/include/asm-arm/byteorder.h deleted file mode 100644 index 9c712c4788..0000000000 --- a/xen/include/asm-arm/byteorder.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __ASM_ARM_BYTEORDER_H__ -#define __ASM_ARM_BYTEORDER_H__ - -#define __BYTEORDER_HAS_U64__ - -#include - -#endif /* __ASM_ARM_BYTEORDER_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/cache.h b/xen/include/asm-arm/cache.h deleted file mode 100644 index 240b6ae0ea..0000000000 --- a/xen/include/asm-arm/cache.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __ARCH_ARM_CACHE_H -#define __ARCH_ARM_CACHE_H - - -/* L1 cache line size */ -#define L1_CACHE_SHIFT (CONFIG_ARM_L1_CACHE_SHIFT) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - -#define __read_mostly __section(".data.read_mostly") - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/cadence-uart.h b/xen/include/asm-arm/cadence-uart.h deleted file mode 100644 index 48680eea4b..0000000000 --- a/xen/include/asm-arm/cadence-uart.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * xen/include/asm-arm/cadence-uart.h - * - * Written by Edgar E. Iglesias - * Copyright (C) 2015 Xilinx Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_CADENCE_UART_H__ -#define __ASM_ARM_CADENCE_UART_H__ - -#define R_UART_CR 0x00 -#define UART_CR_RX_RST 0x01 -#define UART_CR_TX_RST 0x02 -#define UART_CR_RX_ENABLE 0x04 -#define UART_CR_RX_DISABLE 0x08 -#define UART_CR_TX_ENABLE 0x10 -#define UART_CR_TX_DISABLE 0x20 - -#define R_UART_MR 0x04 -#define UART_MR_NO_PARITY 0x20 - -#define R_UART_IER 0x08 -#define R_UART_IDR 0x0C -#define R_UART_IMR 0x10 -#define R_UART_CISR 0x14 -#define R_UART_RTRIG 0x20 -#define R_UART_SR 0x2C -#define UART_SR_INTR_RTRIG 0x01 -#define UART_SR_INTR_REMPTY 0x02 -#define UART_SR_INTR_TEMPTY 0x08 -#define UART_SR_INTR_TFUL 0x10 - -#define R_UART_TX 0x30 -#define R_UART_RX 0x30 - -#endif /* __ASM_ARM_CADENCE_UART_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h deleted file mode 100644 index c7b7791201..0000000000 --- a/xen/include/asm-arm/config.h +++ /dev/null @@ -1,207 +0,0 @@ -/****************************************************************************** - * config.h - * - * A Linux-style configuration list. - */ - -#ifndef __ARM_CONFIG_H__ -#define __ARM_CONFIG_H__ - -#if defined(CONFIG_ARM_64) -# define LONG_BYTEORDER 3 -# define ELFSIZE 64 -#else -# define LONG_BYTEORDER 2 -# define ELFSIZE 32 -#endif - -#define BYTES_PER_LONG (1 << LONG_BYTEORDER) -#define BITS_PER_LONG (BYTES_PER_LONG << 3) -#define POINTER_ALIGN BYTES_PER_LONG - -#define BITS_PER_LLONG 64 - -/* xen_ulong_t is always 64 bits */ -#define BITS_PER_XEN_ULONG 64 - -#define CONFIG_PAGING_LEVELS 3 - -#define CONFIG_ARM 1 - -#define CONFIG_ARM_L1_CACHE_SHIFT 7 /* XXX */ - -#define CONFIG_SMP 1 - -#define CONFIG_IRQ_HAS_MULTIPLE_ACTION 1 - -#define CONFIG_PAGEALLOC_MAX_ORDER 18 -#define CONFIG_DOMU_MAX_ORDER 9 -#define CONFIG_HWDOM_MAX_ORDER 10 - -#define OPT_CONSOLE_STR "dtuart" - -#ifdef CONFIG_ARM_64 -#define MAX_VIRT_CPUS 128u -#else -#define MAX_VIRT_CPUS 8u -#endif - -#define INVALID_VCPU_ID MAX_VIRT_CPUS - -#define __LINUX_ARM_ARCH__ 7 -#define CONFIG_AEABI - -/* Linkage for ARM */ -#ifdef __ASSEMBLY__ -#define ALIGN .align 2 -#define ENTRY(name) \ - .globl name; \ - ALIGN; \ - name: -#define GLOBAL(name) \ - .globl name; \ - name: -#define END(name) \ - .size name, .-name -#define ENDPROC(name) \ - .type name, %function; \ - END(name) -#endif - -#include -#include - -/* - * Common ARM32 and ARM64 layout: - * 0 - 2M Unmapped - * 2M - 4M Xen text, data, bss - * 4M - 6M Fixmap: special-purpose 4K mapping slots - * 6M - 10M Early boot mapping of FDT - * 10M - 12M Early relocation address (used when relocating Xen) - * and later for livepatch vmap (if compiled in) - * - * ARM32 layout: - * 0 - 12M - * - * 32M - 128M Frametable: 24 bytes per page for 16GB of RAM - * 256M - 1G VMAP: ioremap and early_ioremap use this virtual address - * space - * - * 1G - 2G Xenheap: always-mapped memory - * 2G - 4G Domheap: on-demand-mapped - * - * ARM64 layout: - * 0x0000000000000000 - 0x0000007fffffffff (512GB, L0 slot [0]) - * 0 - 12M - * - * 1G - 2G VMAP: ioremap and early_ioremap - * - * 32G - 64G Frametable: 24 bytes per page for 5.3TB of RAM - * - * 0x0000008000000000 - 0x00007fffffffffff (127.5TB, L0 slots [1..255]) - * Unused - * - * 0x0000800000000000 - 0x000084ffffffffff (5TB, L0 slots [256..265]) - * 1:1 mapping of RAM - * - * 0x0000850000000000 - 0x0000ffffffffffff (123TB, L0 slots [266..511]) - * Unused - */ - -#define XEN_VIRT_START _AT(vaddr_t,0x00200000) -#define FIXMAP_ADDR(n) (_AT(vaddr_t,0x00400000) + (n) * PAGE_SIZE) - -#define BOOT_FDT_VIRT_START _AT(vaddr_t,0x00600000) -#define BOOT_FDT_SLOT_SIZE MB(4) -#define BOOT_FDT_VIRT_END (BOOT_FDT_VIRT_START + BOOT_FDT_SLOT_SIZE) - -#define BOOT_RELOC_VIRT_START _AT(vaddr_t,0x00a00000) -#ifdef CONFIG_LIVEPATCH -#define LIVEPATCH_VMAP_START _AT(vaddr_t,0x00a00000) -#define LIVEPATCH_VMAP_END (LIVEPATCH_VMAP_START + MB(2)) -#endif - -#define HYPERVISOR_VIRT_START XEN_VIRT_START - -#ifdef CONFIG_ARM_32 - -#define CONFIG_DOMAIN_PAGE 1 -#define CONFIG_SEPARATE_XENHEAP 1 - -#define FRAMETABLE_VIRT_START _AT(vaddr_t,0x02000000) -#define FRAMETABLE_SIZE MB(128-32) -#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) -#define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1) - -#define VMAP_VIRT_START _AT(vaddr_t,0x10000000) - -#define XENHEAP_VIRT_START _AT(vaddr_t,0x40000000) -#define XENHEAP_VIRT_END _AT(vaddr_t,0x7fffffff) -#define DOMHEAP_VIRT_START _AT(vaddr_t,0x80000000) -#define DOMHEAP_VIRT_END _AT(vaddr_t,0xffffffff) - -#define VMAP_VIRT_END XENHEAP_VIRT_START - -#define DOMHEAP_ENTRIES 1024 /* 1024 2MB mapping slots */ - -/* Number of domheap pagetable pages required at the second level (2MB mappings) */ -#define DOMHEAP_SECOND_PAGES ((DOMHEAP_VIRT_END - DOMHEAP_VIRT_START + 1) >> FIRST_SHIFT) - -#else /* ARM_64 */ - -#define SLOT0_ENTRY_BITS 39 -#define SLOT0(slot) (_AT(vaddr_t,slot) << SLOT0_ENTRY_BITS) -#define SLOT0_ENTRY_SIZE SLOT0(1) - -#define VMAP_VIRT_START GB(1) -#define VMAP_VIRT_END (VMAP_VIRT_START + GB(1)) - -#define FRAMETABLE_VIRT_START GB(32) -#define FRAMETABLE_SIZE GB(32) -#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) -#define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1) - -#define DIRECTMAP_VIRT_START SLOT0(256) -#define DIRECTMAP_SIZE (SLOT0_ENTRY_SIZE * (265-256)) -#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE - 1) - -#define XENHEAP_VIRT_START xenheap_virt_start - -#define HYPERVISOR_VIRT_END DIRECTMAP_VIRT_END - -#endif - -/* Fixmap slots */ -#define FIXMAP_CONSOLE 0 /* The primary UART */ -#define FIXMAP_MISC 1 /* Ephemeral mappings of hardware */ -#define FIXMAP_ACPI_BEGIN 2 /* Start mappings of ACPI tables */ -#define FIXMAP_ACPI_END (FIXMAP_ACPI_BEGIN + NUM_FIXMAP_ACPI_PAGES - 1) /* End mappings of ACPI tables */ - -#define NR_hypercalls 64 - -#define STACK_ORDER 3 -#define STACK_SIZE (PAGE_SIZE << STACK_ORDER) - -#ifndef __ASSEMBLY__ -extern unsigned long xen_phys_start; -extern unsigned long xenheap_phys_end; -extern unsigned long frametable_virt_end; -#endif - -#define watchdog_disable() ((void)0) -#define watchdog_enable() ((void)0) - -#if defined(__ASSEMBLY__) && !defined(__LINKER__) -#include -#include -#endif - -#endif /* __ARM_CONFIG_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h deleted file mode 100644 index 6daf2b1a30..0000000000 --- a/xen/include/asm-arm/cpregs.h +++ /dev/null @@ -1,375 +0,0 @@ -#ifndef __ASM_ARM_CPREGS_H -#define __ASM_ARM_CPREGS_H - -/* - * AArch32 Co-processor registers. - * - * Note that AArch64 requires many of these definitions in order to - * support 32-bit guests. - */ - -#define __HSR_CPREG_c0 0 -#define __HSR_CPREG_c1 1 -#define __HSR_CPREG_c2 2 -#define __HSR_CPREG_c3 3 -#define __HSR_CPREG_c4 4 -#define __HSR_CPREG_c5 5 -#define __HSR_CPREG_c6 6 -#define __HSR_CPREG_c7 7 -#define __HSR_CPREG_c8 8 -#define __HSR_CPREG_c9 9 -#define __HSR_CPREG_c10 10 -#define __HSR_CPREG_c11 11 -#define __HSR_CPREG_c12 12 -#define __HSR_CPREG_c13 13 -#define __HSR_CPREG_c14 14 -#define __HSR_CPREG_c15 15 - -#define __HSR_CPREG_0 0 -#define __HSR_CPREG_1 1 -#define __HSR_CPREG_2 2 -#define __HSR_CPREG_3 3 -#define __HSR_CPREG_4 4 -#define __HSR_CPREG_5 5 -#define __HSR_CPREG_6 6 -#define __HSR_CPREG_7 7 - -#define _HSR_CPREG32(cp,op1,crn,crm,op2) \ - ((__HSR_CPREG_##crn) << HSR_CP32_CRN_SHIFT) | \ - ((__HSR_CPREG_##crm) << HSR_CP32_CRM_SHIFT) | \ - ((__HSR_CPREG_##op1) << HSR_CP32_OP1_SHIFT) | \ - ((__HSR_CPREG_##op2) << HSR_CP32_OP2_SHIFT) - -#define _HSR_CPREG64(cp,op1,crm) \ - ((__HSR_CPREG_##crm) << HSR_CP64_CRM_SHIFT) | \ - ((__HSR_CPREG_##op1) << HSR_CP64_OP1_SHIFT) - -/* Encode a register as per HSR ISS pattern */ -#define HSR_CPREG32(X...) _HSR_CPREG32(X) -#define HSR_CPREG64(X...) _HSR_CPREG64(X) - -/* - * Order registers by Coprocessor-> CRn-> Opcode 1-> CRm-> Opcode 2 - * - * This matches the ordering used in the ARM as well as the groupings - * which the CP registers are allocated in. - * - * This is slightly different to the form of the instruction - * arguments, which are cp,opc1,crn,crm,opc2. - */ - -/* Coprocessor 10 */ - -#define FPSID p10,7,c0,c0,0 /* Floating-Point System ID Register */ -#define FPSCR p10,7,c1,c0,0 /* Floating-Point Status and Control Register */ -#define MVFR0 p10,7,c7,c0,0 /* Media and VFP Feature Register 0 */ -#define MVFR1 p10,7,c6,c0,0 /* Media and VFP Feature Register 1 */ -#define MVFR2 p10,7,c5,c0,0 /* Media and VFP Feature Register 2 */ -#define FPEXC p10,7,c8,c0,0 /* Floating-Point Exception Control Register */ -#define FPINST p10,7,c9,c0,0 /* Floating-Point Instruction Register */ -#define FPINST2 p10,7,c10,c0,0 /* Floating-point Instruction Register 2 */ - -/* Coprocessor 14 */ - -/* CP14 0: Debug Register interface */ -#define DBGDIDR p14,0,c0,c0,0 /* Debug ID Register */ -#define DBGDSCRINT p14,0,c0,c1,0 /* Debug Status and Control Internal */ -#define DBGDSCREXT p14,0,c0,c2,2 /* Debug Status and Control External */ -#define DBGVCR p14,0,c0,c7,0 /* Vector Catch */ -#define DBGBVR0 p14,0,c0,c0,4 /* Breakpoint Value 0 */ -#define DBGBCR0 p14,0,c0,c0,5 /* Breakpoint Control 0 */ -#define DBGWVR0 p14,0,c0,c0,6 /* Watchpoint Value 0 */ -#define DBGWCR0 p14,0,c0,c0,7 /* Watchpoint Control 0 */ -#define DBGBVR1 p14,0,c0,c1,4 /* Breakpoint Value 1 */ -#define DBGBCR1 p14,0,c0,c1,5 /* Breakpoint Control 1 */ -#define DBGOSLAR p14,0,c1,c0,4 /* OS Lock Access */ -#define DBGOSLSR p14,0,c1,c1,4 /* OS Lock Status Register */ -#define DBGOSDLR p14,0,c1,c3,4 /* OS Double Lock */ -#define DBGPRCR p14,0,c1,c4,4 /* Debug Power Control Register */ - -/* CP14 CR0: */ -#define TEECR p14,6,c0,c0,0 /* ThumbEE Configuration Register */ - -/* CP14 CR1: */ -#define DBGDRAR64 p14,0,c1 /* Debug ROM Address Register (64-bit access) */ -#define DBGDRAR p14,0,c1,c0,0 /* Debug ROM Address Register (32-bit access) */ -#define TEEHBR p14,6,c1,c0,0 /* ThumbEE Handler Base Register */ -#define JOSCR p14,7,c1,c0,0 /* Jazelle OS Control Register */ - -/* CP14 CR2: */ -#define DBGDSAR64 p14,0,c2 /* Debug Self Address Offset Register (64-bit access) */ -#define DBGDSAR p14,0,c2,c0,0 /* Debug Self Address Offset Register (32-bit access) */ -#define JMCR p14,7,c2,c0,0 /* Jazelle Main Configuration Register */ - - -/* Coprocessor 15 */ - -/* CP15 CR0: CPUID and Cache Type Registers */ -#define MIDR p15,0,c0,c0,0 /* Main ID Register */ -#define CTR p15,0,c0,c0,1 /* Cache Type Register */ -#define MPIDR p15,0,c0,c0,5 /* Multiprocessor Affinity Register */ -#define ID_PFR0 p15,0,c0,c1,0 /* Processor Feature Register 0 */ -#define ID_PFR1 p15,0,c0,c1,1 /* Processor Feature Register 1 */ -#define ID_PFR2 p15,0,c0,c3,4 /* Processor Feature Register 2 */ -#define ID_DFR0 p15,0,c0,c1,2 /* Debug Feature Register 0 */ -#define ID_DFR1 p15,0,c0,c3,5 /* Debug Feature Register 1 */ -#define ID_AFR0 p15,0,c0,c1,3 /* Auxiliary Feature Register 0 */ -#define ID_MMFR0 p15,0,c0,c1,4 /* Memory Model Feature Register 0 */ -#define ID_MMFR1 p15,0,c0,c1,5 /* Memory Model Feature Register 1 */ -#define ID_MMFR2 p15,0,c0,c1,6 /* Memory Model Feature Register 2 */ -#define ID_MMFR3 p15,0,c0,c1,7 /* Memory Model Feature Register 3 */ -#define ID_MMFR4 p15,0,c0,c2,6 /* Memory Model Feature Register 4 */ -#define ID_MMFR5 p15,0,c0,c3,6 /* Memory Model Feature Register 5 */ -#define ID_ISAR0 p15,0,c0,c2,0 /* ISA Feature Register 0 */ -#define ID_ISAR1 p15,0,c0,c2,1 /* ISA Feature Register 1 */ -#define ID_ISAR2 p15,0,c0,c2,2 /* ISA Feature Register 2 */ -#define ID_ISAR3 p15,0,c0,c2,3 /* ISA Feature Register 3 */ -#define ID_ISAR4 p15,0,c0,c2,4 /* ISA Feature Register 4 */ -#define ID_ISAR5 p15,0,c0,c2,5 /* ISA Feature Register 5 */ -#define ID_ISAR6 p15,0,c0,c2,7 /* ISA Feature Register 6 */ -#define CCSIDR p15,1,c0,c0,0 /* Cache Size ID Registers */ -#define CLIDR p15,1,c0,c0,1 /* Cache Level ID Register */ -#define CSSELR p15,2,c0,c0,0 /* Cache Size Selection Register */ -#define VPIDR p15,4,c0,c0,0 /* Virtualization Processor ID Register */ -#define VMPIDR p15,4,c0,c0,5 /* Virtualization Multiprocessor ID Register */ - -/* CP15 CR1: System Control Registers */ -#define SCTLR p15,0,c1,c0,0 /* System Control Register */ -#define ACTLR p15,0,c1,c0,1 /* Auxiliary Control Register */ -#define CPACR p15,0,c1,c0,2 /* Coprocessor Access Control Register */ -#define SCR p15,0,c1,c1,0 /* Secure Configuration Register */ -#define NSACR p15,0,c1,c1,2 /* Non-Secure Access Control Register */ -#define HSCTLR p15,4,c1,c0,0 /* Hyp. System Control Register */ -#define HCR p15,4,c1,c1,0 /* Hyp. Configuration Register */ -#define HDCR p15,4,c1,c1,1 /* Hyp. Debug Configuration Register */ -#define HCPTR p15,4,c1,c1,2 /* Hyp. Coprocessor Trap Register */ -#define HSTR p15,4,c1,c1,3 /* Hyp. System Trap Register */ - -/* CP15 CR2: Translation Table Base and Control Registers */ -#define TTBCR p15,0,c2,c0,2 /* Translation Table Base Control Register */ -#define TTBCR2 p15,0,c2,c0,3 /* Translation Table Base Control Register 2 */ -#define TTBR0 p15,0,c2 /* Translation Table Base Reg. 0 */ -#define TTBR1 p15,1,c2 /* Translation Table Base Reg. 1 */ -#define HTTBR p15,4,c2 /* Hyp. Translation Table Base Register */ -#define TTBR0_32 p15,0,c2,c0,0 /* 32-bit access to TTBR0 */ -#define TTBR1_32 p15,0,c2,c0,1 /* 32-bit access to TTBR1 */ -#define HTCR p15,4,c2,c0,2 /* Hyp. Translation Control Register */ -#define VTCR p15,4,c2,c1,2 /* Virtualization Translation Control Register */ -#define VTTBR p15,6,c2 /* Virtualization Translation Table Base Register */ - -/* CP15 CR3: Domain Access Control Register */ -#define DACR p15,0,c3,c0,0 /* Domain Access Control Register */ - -/* CP15 CR4: */ - -/* CP15 CR5: Fault Status Registers */ -#define DFSR p15,0,c5,c0,0 /* Data Fault Status Register */ -#define IFSR p15,0,c5,c0,1 /* Instruction Fault Status Register */ -#define ADFSR p15,0,c5,c1,0 /* Auxiliary Data Fault Status Register */ -#define AIFSR p15,0,c5,c1,1 /* Auxiliary Instruction Fault Status Register */ -#define HSR p15,4,c5,c2,0 /* Hyp. Syndrome Register */ - -/* CP15 CR6: Fault Address Registers */ -#define DFAR p15,0,c6,c0,0 /* Data Fault Address Register */ -#define IFAR p15,0,c6,c0,2 /* Instruction Fault Address Register */ -#define HDFAR p15,4,c6,c0,0 /* Hyp. Data Fault Address Register */ -#define HIFAR p15,4,c6,c0,2 /* Hyp. Instruction Fault Address Register */ -#define HPFAR p15,4,c6,c0,4 /* Hyp. IPA Fault Address Register */ - -/* CP15 CR7: Cache and address translation operations */ -#define PAR p15,0,c7 /* Physical Address Register */ - -#define ICIALLUIS p15,0,c7,c1,0 /* Invalidate all instruction caches to PoU inner shareable */ -#define BPIALLIS p15,0,c7,c1,6 /* Invalidate entire branch predictor array inner shareable */ -#define ICIALLU p15,0,c7,c5,0 /* Invalidate all instruction caches to PoU */ -#define ICIMVAU p15,0,c7,c5,1 /* Invalidate instruction caches by MVA to PoU */ -#define BPIALL p15,0,c7,c5,6 /* Invalidate entire branch predictor array */ -#define BPIMVA p15,0,c7,c5,7 /* Invalidate MVA from branch predictor array */ -#define DCIMVAC p15,0,c7,c6,1 /* Invalidate data cache line by MVA to PoC */ -#define DCISW p15,0,c7,c6,2 /* Invalidate data cache line by set/way */ -#define ATS1CPR p15,0,c7,c8,0 /* Address Translation Stage 1. Non-Secure Kernel Read */ -#define ATS1CPW p15,0,c7,c8,1 /* Address Translation Stage 1. Non-Secure Kernel Write */ -#define ATS1CUR p15,0,c7,c8,2 /* Address Translation Stage 1. Non-Secure User Read */ -#define ATS1CUW p15,0,c7,c8,3 /* Address Translation Stage 1. Non-Secure User Write */ -#define ATS12NSOPR p15,0,c7,c8,4 /* Address Translation Stage 1+2 Non-Secure Kernel Read */ -#define ATS12NSOPW p15,0,c7,c8,5 /* Address Translation Stage 1+2 Non-Secure Kernel Write */ -#define ATS12NSOUR p15,0,c7,c8,6 /* Address Translation Stage 1+2 Non-Secure User Read */ -#define ATS12NSOUW p15,0,c7,c8,7 /* Address Translation Stage 1+2 Non-Secure User Write */ -#define DCCMVAC p15,0,c7,c10,1 /* Clean data or unified cache line by MVA to PoC */ -#define DCCSW p15,0,c7,c10,2 /* Clean data cache line by set/way */ -#define DCCMVAU p15,0,c7,c11,1 /* Clean data cache line by MVA to PoU */ -#define DCCIMVAC p15,0,c7,c14,1 /* Data cache clean and invalidate by MVA */ -#define DCCISW p15,0,c7,c14,2 /* Clean and invalidate data cache line by set/way */ -#define ATS1HR p15,4,c7,c8,0 /* Address Translation Stage 1 Hyp. Read */ -#define ATS1HW p15,4,c7,c8,1 /* Address Translation Stage 1 Hyp. Write */ - -/* CP15 CR8: TLB maintenance operations */ -#define TLBIALLIS p15,0,c8,c3,0 /* Invalidate entire TLB innrer shareable */ -#define TLBIMVAIS p15,0,c8,c3,1 /* Invalidate unified TLB entry by MVA inner shareable */ -#define TLBIASIDIS p15,0,c8,c3,2 /* Invalidate unified TLB by ASID match inner shareable */ -#define TLBIMVAAIS p15,0,c8,c3,3 /* Invalidate unified TLB entry by MVA all ASID inner shareable */ -#define ITLBIALL p15,0,c8,c5,0 /* Invalidate instruction TLB */ -#define ITLBIMVA p15,0,c8,c5,1 /* Invalidate instruction TLB entry by MVA */ -#define ITLBIASID p15,0,c8,c5,2 /* Invalidate instruction TLB by ASID match */ -#define DTLBIALL p15,0,c8,c6,0 /* Invalidate data TLB */ -#define DTLBIMVA p15,0,c8,c6,1 /* Invalidate data TLB entry by MVA */ -#define DTLBIASID p15,0,c8,c6,2 /* Invalidate data TLB by ASID match */ -#define TLBIALL p15,0,c8,c7,0 /* invalidate unified TLB */ -#define TLBIMVA p15,0,c8,c7,1 /* invalidate unified TLB entry by MVA */ -#define TLBIASID p15,0,c8,c7,2 /* invalid unified TLB by ASID match */ -#define TLBIMVAA p15,0,c8,c7,3 /* invalidate unified TLB entries by MVA all ASID */ -#define TLBIALLHIS p15,4,c8,c3,0 /* Invalidate Entire Hyp. Unified TLB inner shareable */ -#define TLBIMVAHIS p15,4,c8,c3,1 /* Invalidate Unified Hyp. TLB by MVA inner shareable */ -#define TLBIALLNSNHIS p15,4,c8,c3,4 /* Invalidate Entire Non-Secure Non-Hyp. Unified TLB inner shareable */ -#define TLBIALLH p15,4,c8,c7,0 /* Invalidate Entire Hyp. Unified TLB */ -#define TLBIMVAH p15,4,c8,c7,1 /* Invalidate Unified Hyp. TLB by MVA */ -#define TLBIALLNSNH p15,4,c8,c7,4 /* Invalidate Entire Non-Secure Non-Hyp. Unified TLB */ - -/* CP15 CR9: Performance monitors */ -#define PMCR p15,0,c9,c12,0 /* Perf. Mon. Control Register */ -#define PMCNTENSET p15,0,c9,c12,1 /* Perf. Mon. Count Enable Set register */ -#define PMCNTENCLR p15,0,c9,c12,2 /* Perf. Mon. Count Enable Clear register */ -#define PMOVSR p15,0,c9,c12,3 /* Perf. Mon. Overflow Flag Status Register */ -#define PMSWINC p15,0,c9,c12,4 /* Perf. Mon. Software Increment register */ -#define PMSELR p15,0,c9,c12,5 /* Perf. Mon. Event Counter Selection Register */ -#define PMCEID0 p15,0,c9,c12,6 /* Perf. Mon. Common Event Identification register 0 */ -#define PMCEID1 p15,0,c9,c12,7 /* Perf. Mon. Common Event Identification register 1 */ -#define PMCCNTR p15,0,c9,c13,0 /* Perf. Mon. Cycle Count Register */ -#define PMXEVTYPER p15,0,c9,c13,1 /* Perf. Mon. Event Type Select Register */ -#define PMXEVCNTR p15,0,c9,c13,2 /* Perf. Mon. Event Count Register */ -#define PMUSERENR p15,0,c9,c14,0 /* Perf. Mon. User Enable Register */ -#define PMINTENSET p15,0,c9,c14,1 /* Perf. Mon. Interrupt Enable Set Register */ -#define PMINTENCLR p15,0,c9,c14,2 /* Perf. Mon. Interrupt Enable Clear Register */ -#define PMOVSSET p15,0,c9,c14,3 /* Perf. Mon. Overflow Flag Status Set register */ - -/* CP15 CR10: */ -#define MAIR0 p15,0,c10,c2,0 /* Memory Attribute Indirection Register 0 AKA PRRR */ -#define MAIR1 p15,0,c10,c2,1 /* Memory Attribute Indirection Register 1 AKA NMRR */ -#define HMAIR0 p15,4,c10,c2,0 /* Hyp. Memory Attribute Indirection Register 0 */ -#define HMAIR1 p15,4,c10,c2,1 /* Hyp. Memory Attribute Indirection Register 1 */ -#define AMAIR0 p15,0,c10,c3,0 /* Aux. Memory Attribute Indirection Register 0 */ -#define AMAIR1 p15,0,c10,c3,1 /* Aux. Memory Attribute Indirection Register 1 */ - -/* CP15 CR11: DMA Operations for TCM Access */ - -/* CP15 CR12: */ -#define ICC_SGI1R p15,0,c12 /* Interrupt Controller SGI Group 1 */ -#define ICC_ASGI1R p15,1,c12 /* Interrupt Controller Alias SGI Group 1 Register */ -#define ICC_SGI0R p15,2,c12 /* Interrupt Controller SGI Group 0 */ -#define VBAR p15,0,c12,c0,0 /* Vector Base Address Register */ -#define HVBAR p15,4,c12,c0,0 /* Hyp. Vector Base Address Register */ - -/* CP15 CR13: */ -#define FCSEIDR p15,0,c13,c0,0 /* FCSE Process ID Register */ -#define CONTEXTIDR p15,0,c13,c0,1 /* Context ID Register */ -#define TPIDRURW p15,0,c13,c0,2 /* Software Thread ID, User, R/W */ -#define TPIDRURO p15,0,c13,c0,3 /* Software Thread ID, User, R/O */ -#define TPIDRPRW p15,0,c13,c0,4 /* Software Thread ID, Priveleged */ -#define HTPIDR p15,4,c13,c0,2 /* HYp Software Thread Id Register */ - -/* CP15 CR14: */ -#define CNTPCT p15,0,c14 /* Time counter value */ -#define CNTFRQ p15,0,c14,c0,0 /* Time counter frequency */ -#define CNTKCTL p15,0,c14,c1,0 /* Time counter kernel control */ -#define CNTP_TVAL p15,0,c14,c2,0 /* Physical Timer value */ -#define CNTP_CTL p15,0,c14,c2,1 /* Physical Timer control register */ -#define CNTVCT p15,1,c14 /* Time counter value + offset */ -#define CNTP_CVAL p15,2,c14 /* Physical Timer comparator */ -#define CNTV_CVAL p15,3,c14 /* Virt. Timer comparator */ -#define CNTVOFF p15,4,c14 /* Time counter offset */ -#define CNTHCTL p15,4,c14,c1,0 /* Time counter hyp. control */ -#define CNTHP_TVAL p15,4,c14,c2,0 /* Hyp. Timer value */ -#define CNTHP_CTL p15,4,c14,c2,1 /* Hyp. Timer control register */ -#define CNTV_TVAL p15,0,c14,c3,0 /* Virt. Timer value */ -#define CNTV_CTL p15,0,c14,c3,1 /* Virt. TImer control register */ -#define CNTHP_CVAL p15,6,c14 /* Hyp. Timer comparator */ - -/* CP15 CR15: Implementation Defined Registers */ - -/* Aliases of AArch64 names for use in common code when building for AArch32 */ -#ifdef CONFIG_ARM_32 -/* Alphabetically... */ -#define ACTLR_EL1 ACTLR -#define AFSR0_EL1 ADFSR -#define AFSR1_EL1 AIFSR -#define CCSIDR_EL1 CCSIDR -#define CLIDR_EL1 CLIDR -#define CNTFRQ_EL0 CNTFRQ -#define CNTHCTL_EL2 CNTHCTL -#define CNTHP_CTL_EL2 CNTHP_CTL -#define CNTHP_CVAL_EL2 CNTHP_CVAL -#define CNTKCTL_EL1 CNTKCTL -#define CNTPCT_EL0 CNTPCT -#define CNTP_CTL_EL0 CNTP_CTL -#define CNTP_CVAL_EL0 CNTP_CVAL -#define CNTVCT_EL0 CNTVCT -#define CNTVOFF_EL2 CNTVOFF -#define CNTV_CTL_EL0 CNTV_CTL -#define CNTV_CVAL_EL0 CNTV_CVAL -#define CONTEXTIDR_EL1 CONTEXTIDR -#define CPACR_EL1 CPACR -#define CPTR_EL2 HCPTR -#define CSSELR_EL1 CSSELR -#define CTR_EL0 CTR -#define DACR32_EL2 DACR -#define ESR_EL1 DFSR -#define ESR_EL2 HSR -#define HCR_EL2 HCR -#define HPFAR_EL2 HPFAR -#define HSTR_EL2 HSTR -#define ID_AFR0_EL1 ID_AFR0 -#define ID_DFR0_EL1 ID_DFR0 -#define ID_DFR1_EL1 ID_DFR1 -#define ID_ISAR0_EL1 ID_ISAR0 -#define ID_ISAR1_EL1 ID_ISAR1 -#define ID_ISAR2_EL1 ID_ISAR2 -#define ID_ISAR3_EL1 ID_ISAR3 -#define ID_ISAR4_EL1 ID_ISAR4 -#define ID_ISAR5_EL1 ID_ISAR5 -#define ID_ISAR6_EL1 ID_ISAR6 -#define ID_MMFR0_EL1 ID_MMFR0 -#define ID_MMFR1_EL1 ID_MMFR1 -#define ID_MMFR2_EL1 ID_MMFR2 -#define ID_MMFR3_EL1 ID_MMFR3 -#define ID_MMFR4_EL1 ID_MMFR4 -#define ID_MMFR5_EL1 ID_MMFR5 -#define ID_PFR0_EL1 ID_PFR0 -#define ID_PFR1_EL1 ID_PFR1 -#define ID_PFR2_EL1 ID_PFR2 -#define IFSR32_EL2 IFSR -#define MDCR_EL2 HDCR -#define MIDR_EL1 MIDR -#define MPIDR_EL1 MPIDR -#define PAR_EL1 PAR -#define SCTLR_EL1 SCTLR -#define SCTLR_EL2 HSCTLR -#define TCR_EL1 TTBCR -#define TEECR32_EL1 TEECR -#define TEEHBR32_EL1 TEEHBR -#define TPIDRRO_EL0 TPIDRURO -#define TPIDR_EL0 TPIDRURW -#define TPIDR_EL1 TPIDRPRW -#define TPIDR_EL2 HTPIDR -#define TTBR0_EL1 TTBR0 -#define TTBR0_EL2 HTTBR -#define TTBR1_EL1 TTBR1 -#define VBAR_EL1 VBAR -#define VBAR_EL2 HVBAR -#define VMPIDR_EL2 VMPIDR -#define VPIDR_EL2 VPIDR -#define VTCR_EL2 VTCR -#define VTTBR_EL2 VTTBR -#define MVFR0_EL1 MVFR0 -#define MVFR1_EL1 MVFR1 -#define MVFR2_EL1 MVFR2 -#endif - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/cpuerrata.h b/xen/include/asm-arm/cpuerrata.h deleted file mode 100644 index 8d7e7b9375..0000000000 --- a/xen/include/asm-arm/cpuerrata.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef __ARM_CPUERRATA_H__ -#define __ARM_CPUERRATA_H__ - -#include -#include - -void check_local_cpu_errata(void); -void enable_errata_workarounds(void); - -#define CHECK_WORKAROUND_HELPER(erratum, feature, arch) \ -static inline bool check_workaround_##erratum(void) \ -{ \ - if ( !IS_ENABLED(arch) ) \ - return false; \ - else \ - { \ - register_t ret; \ - \ - asm volatile (ALTERNATIVE("mov %0, #0", \ - "mov %0, #1", \ - feature) \ - : "=r" (ret)); \ - \ - return unlikely(ret); \ - } \ -} - -CHECK_WORKAROUND_HELPER(766422, ARM32_WORKAROUND_766422, CONFIG_ARM_32) -CHECK_WORKAROUND_HELPER(834220, ARM64_WORKAROUND_834220, CONFIG_ARM_64) -CHECK_WORKAROUND_HELPER(ssbd, ARM_SSBD, CONFIG_ARM_SSBD) -CHECK_WORKAROUND_HELPER(858921, ARM_WORKAROUND_858921, - CONFIG_ARM_ERRATUM_858921) - -#undef CHECK_WORKAROUND_HELPER - -enum ssbd_state -{ - ARM_SSBD_UNKNOWN, - ARM_SSBD_FORCE_DISABLE, - ARM_SSBD_RUNTIME, - ARM_SSBD_FORCE_ENABLE, - ARM_SSBD_MITIGATED, -}; - -#ifdef CONFIG_ARM_SSBD - -#include - -extern enum ssbd_state ssbd_state; - -static inline enum ssbd_state get_ssbd_state(void) -{ - return ssbd_state; -} - -DECLARE_PER_CPU(register_t, ssbd_callback_required); - -static inline bool cpu_require_ssbd_mitigation(void) -{ - return this_cpu(ssbd_callback_required); -} - -#else - -static inline bool cpu_require_ssbd_mitigation(void) -{ - return false; -} - -static inline enum ssbd_state get_ssbd_state(void) -{ - return ARM_SSBD_UNKNOWN; -} - -#endif - -#endif /* __ARM_CPUERRATA_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/cpufeature.h b/xen/include/asm-arm/cpufeature.h deleted file mode 100644 index 8a5afbaf0b..0000000000 --- a/xen/include/asm-arm/cpufeature.h +++ /dev/null @@ -1,428 +0,0 @@ -#ifndef __ASM_ARM_CPUFEATURE_H -#define __ASM_ARM_CPUFEATURE_H - -#ifdef CONFIG_ARM_64 -#define cpu_feature64(c, feat) ((c)->pfr64.feat) -#define boot_cpu_feature64(feat) (system_cpuinfo.pfr64.feat) -#define boot_dbg_feature64(feat) (system_cpuinfo.dbg64.feat) - -#define cpu_feature64_has_el0_32(c) (cpu_feature64(c, el0) == 2) - -#define cpu_has_el0_32 (boot_cpu_feature64(el0) == 2) -#define cpu_has_el0_64 (boot_cpu_feature64(el0) >= 1) -#define cpu_has_el1_32 (boot_cpu_feature64(el1) == 2) -#define cpu_has_el1_64 (boot_cpu_feature64(el1) >= 1) -#define cpu_has_el2_32 (boot_cpu_feature64(el2) == 2) -#define cpu_has_el2_64 (boot_cpu_feature64(el2) >= 1) -#define cpu_has_el3_32 (boot_cpu_feature64(el3) == 2) -#define cpu_has_el3_64 (boot_cpu_feature64(el3) >= 1) -#define cpu_has_fp (boot_cpu_feature64(fp) < 8) -#define cpu_has_simd (boot_cpu_feature64(simd) < 8) -#define cpu_has_gicv3 (boot_cpu_feature64(gic) >= 1) -#endif - -#define cpu_feature32(c, feat) ((c)->pfr32.feat) -#define boot_cpu_feature32(feat) (system_cpuinfo.pfr32.feat) -#define boot_dbg_feature32(feat) (system_cpuinfo.dbg32.feat) - -#define cpu_has_arm (boot_cpu_feature32(arm) == 1) -#define cpu_has_thumb (boot_cpu_feature32(thumb) >= 1) -#define cpu_has_thumb2 (boot_cpu_feature32(thumb) >= 3) -#define cpu_has_jazelle (boot_cpu_feature32(jazelle) > 0) -#define cpu_has_thumbee (boot_cpu_feature32(thumbee) == 1) -#define cpu_has_aarch32 (cpu_has_arm || cpu_has_thumb) - -#ifdef CONFIG_ARM_32 -#define cpu_has_gentimer (boot_cpu_feature32(gentimer) == 1) -/* - * On Armv7, the value 0 is used to indicate that PMUv2 is not - * supported. IOW this doesn't tell us whether the PMU is not supported - * (a processor may implement PMUv1). - * - * For convenience, we treat 0 as not supported which matches the - * meaning on Armv8 - */ -#define cpu_has_pmu ((boot_dbg_feature32(perfmon) >= 1) && \ - (boot_dbg_feature32(perfmon) < 15)) -#else -#define cpu_has_gentimer (1) -#define cpu_has_pmu ((boot_dbg_feature64(pmu_ver) >= 1) && \ - (boot_dbg_feature64(pmu_ver) < 15)) -#endif -#define cpu_has_security (boot_cpu_feature32(security) > 0) - -#define ARM64_WORKAROUND_CLEAN_CACHE 0 -#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 -#define ARM32_WORKAROUND_766422 2 -#define ARM64_WORKAROUND_834220 3 -#define LIVEPATCH_FEATURE 4 -#define SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT 5 -#define ARM_HARDEN_BRANCH_PREDICTOR 6 -#define ARM_SSBD 7 -#define ARM_SMCCC_1_1 8 -#define ARM64_WORKAROUND_AT_SPECULATE 9 -#define ARM_WORKAROUND_858921 10 -#define ARM64_WORKAROUND_REPEAT_TLBI 11 - -#define ARM_NCAPS 12 - -#ifndef __ASSEMBLY__ - -#include -#include -#include - -extern DECLARE_BITMAP(cpu_hwcaps, ARM_NCAPS); - -static inline bool cpus_have_cap(unsigned int num) -{ - if ( num >= ARM_NCAPS ) - return false; - - return test_bit(num, cpu_hwcaps); -} - -static inline int cpu_nr_siblings(unsigned int cpu) -{ - return 1; -} - -/* System capability check for constant cap */ -#define cpus_have_const_cap(num) ({ \ - register_t __ret; \ - \ - asm volatile (ALTERNATIVE("mov %0, #0", \ - "mov %0, #1", \ - num) \ - : "=r" (__ret)); \ - \ - unlikely(__ret); \ - }) - -static inline void cpus_set_cap(unsigned int num) -{ - if (num >= ARM_NCAPS) - printk(XENLOG_WARNING "Attempt to set an illegal CPU capability (%d >= %d)\n", - num, ARM_NCAPS); - else - __set_bit(num, cpu_hwcaps); -} - -struct arm_cpu_capabilities { - const char *desc; - u16 capability; - bool (*matches)(const struct arm_cpu_capabilities *); - int (*enable)(void *); /* Called on every active CPUs */ - union { - struct { /* To be used for eratum handling only */ - u32 midr_model; - u32 midr_range_min, midr_range_max; - }; - }; -}; - -void update_cpu_capabilities(const struct arm_cpu_capabilities *caps, - const char *info); - -void enable_cpu_capabilities(const struct arm_cpu_capabilities *caps); -int enable_nonboot_cpu_caps(const struct arm_cpu_capabilities *caps); - -/* - * capabilities of CPUs - */ -struct cpuinfo_arm { - union { - register_t bits; - struct { - unsigned long revision:4; - unsigned long part_number:12; - unsigned long architecture:4; - unsigned long variant:4; - unsigned long implementer:8; -#ifdef CONFIG_ARM_64 - unsigned long _res0:32; -#endif - }; - } midr; - union { - register_t bits; - struct { - unsigned long aff0:8; - unsigned long aff1:8; - unsigned long aff2:8; - unsigned long mt:1; /* Multi-thread, iff MP == 1 */ - unsigned long __res0:5; - unsigned long up:1; /* UP system, iff MP == 1 */ - unsigned long mp:1; /* MP extensions */ - -#ifdef CONFIG_ARM_64 - unsigned long aff3:8; - unsigned long __res1:24; -#endif - }; - } mpidr; - -#ifdef CONFIG_ARM_64 - /* 64-bit CPUID registers. */ - union { - register_t bits[2]; - struct { - /* PFR0 */ - unsigned long el0:4; - unsigned long el1:4; - unsigned long el2:4; - unsigned long el3:4; - unsigned long fp:4; /* Floating Point */ - unsigned long simd:4; /* Advanced SIMD */ - unsigned long gic:4; /* GIC support */ - unsigned long ras:4; - unsigned long sve:4; - unsigned long sel2:4; - unsigned long mpam:4; - unsigned long amu:4; - unsigned long dit:4; - unsigned long __res0:4; - unsigned long csv2:4; - unsigned long cvs3:4; - - /* PFR1 */ - unsigned long bt:4; - unsigned long ssbs:4; - unsigned long mte:4; - unsigned long ras_frac:4; - unsigned long mpam_frac:4; - unsigned long __res1:44; - }; - } pfr64; - - union { - register_t bits[2]; - struct { - /* DFR0 */ - unsigned long debug_ver:4; - unsigned long trace_ver:4; - unsigned long pmu_ver:4; - unsigned long brps:4; - unsigned long __res0:4; - unsigned long wrps:4; - unsigned long __res1:4; - unsigned long ctx_cmps:4; - unsigned long pms_ver:4; - unsigned long double_lock:4; - unsigned long trace_filt:4; - unsigned long __res2:4; - unsigned long mtpmu:4; - unsigned long __res3:12; - - /* DFR1 */ - unsigned long __res4:64; - }; - } dbg64; - - struct { - register_t bits[2]; - } aux64; - - union { - register_t bits[3]; - struct { - unsigned long pa_range:4; - unsigned long asid_bits:4; - unsigned long bigend:4; - unsigned long secure_ns:4; - unsigned long bigend_el0:4; - unsigned long tgranule_16K:4; - unsigned long tgranule_64K:4; - unsigned long tgranule_4K:4; - unsigned long __res0:32; - - unsigned long hafdbs:4; - unsigned long vmid_bits:4; - unsigned long vh:4; - unsigned long hpds:4; - unsigned long lo:4; - unsigned long pan:4; - unsigned long __res1:8; - unsigned long __res2:32; - - unsigned long __res3:64; - }; - } mm64; - - union { - register_t bits[2]; - struct { - /* ISAR0 */ - unsigned long __res0:4; - unsigned long aes:4; - unsigned long sha1:4; - unsigned long sha2:4; - unsigned long crc32:4; - unsigned long atomic:4; - unsigned long __res1:4; - unsigned long rdm:4; - unsigned long sha3:4; - unsigned long sm3:4; - unsigned long sm4:4; - unsigned long dp:4; - unsigned long fhm:4; - unsigned long ts:4; - unsigned long tlb:4; - unsigned long rndr:4; - - /* ISAR1 */ - unsigned long dpb:4; - unsigned long apa:4; - unsigned long api:4; - unsigned long jscvt:4; - unsigned long fcma:4; - unsigned long lrcpc:4; - unsigned long gpa:4; - unsigned long gpi:4; - unsigned long frintts:4; - unsigned long sb:4; - unsigned long specres:4; - unsigned long bf16:4; - unsigned long dgh:4; - unsigned long i8mm:4; - unsigned long __res2:8; - }; - } isa64; - - struct { - register_t bits[1]; - } zfr64; - - /* - * DCZID is only used to check for incoherent values between cores - * and taint Xen in this case - */ - struct { - register_t bits[1]; - } dczid; - - /* - * CTR is only used to check for different cache types or policies and - * taint Xen in this case - */ - struct { - register_t bits[1]; - } ctr; - -#endif - - /* - * 32-bit CPUID registers. On ARMv8 these describe the properties - * when running in 32-bit mode. - */ - union { - register_t bits[3]; - struct { - /* PFR0 */ - unsigned long arm:4; - unsigned long thumb:4; - unsigned long jazelle:4; - unsigned long thumbee:4; - unsigned long csv2:4; - unsigned long amu:4; - unsigned long dit:4; - unsigned long ras:4; -#ifdef CONFIG_ARM_64 - unsigned long __res0:32; -#endif - - /* PFR1 */ - unsigned long progmodel:4; - unsigned long security:4; - unsigned long mprofile:4; - unsigned long virt:4; - unsigned long gentimer:4; - unsigned long sec_frac:4; - unsigned long virt_frac:4; - unsigned long gic:4; -#ifdef CONFIG_ARM_64 - unsigned long __res1:32; -#endif - - /* PFR2 */ - unsigned long csv3:4; - unsigned long ssbs:4; - unsigned long ras_frac:4; - unsigned long __res2:20; -#ifdef CONFIG_ARM_64 - unsigned long __res3:32; -#endif - }; - } pfr32; - - union { - register_t bits[2]; - struct { - /* DFR0 */ - unsigned long copdbg:4; - unsigned long copsdbg:4; - unsigned long mmapdbg:4; - unsigned long coptrc:4; - unsigned long mmaptrc:4; - unsigned long mprofdbg:4; - unsigned long perfmon:4; - unsigned long tracefilt:4; -#ifdef CONFIG_ARM_64 - unsigned long __res0:32; -#endif - - /* DFR1 */ - unsigned long mtpmu:4; - unsigned long __res1:28; -#ifdef CONFIG_ARM_64 - unsigned long __res2:32; -#endif - }; - } dbg32; - - struct { - register_t bits[1]; - } aux32; - - struct { - register_t bits[6]; - } mm32; - - struct { - register_t bits[7]; - } isa32; - - struct { - register_t bits[3]; - } mvfr; -}; - -extern struct cpuinfo_arm system_cpuinfo; - -extern void identify_cpu(struct cpuinfo_arm *); - -#ifdef CONFIG_ARM_64 -extern void update_system_features(const struct cpuinfo_arm *); -#else -static inline void update_system_features(const struct cpuinfo_arm *cpuinfo) -{ - /* Not supported on arm32 */ -} -#endif - -extern struct cpuinfo_arm cpu_data[]; -#define current_cpu_data cpu_data[smp_processor_id()] - -extern struct cpuinfo_arm guest_cpuinfo; - -#endif /* __ASSEMBLY__ */ - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/current.h b/xen/include/asm-arm/current.h deleted file mode 100644 index 73e81458e5..0000000000 --- a/xen/include/asm-arm/current.h +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef __ARM_CURRENT_H__ -#define __ARM_CURRENT_H__ - -#include -#include - -#include - -/* Tell whether the guest vCPU enabled Workaround 2 (i.e variant 4) */ -#define CPUINFO_WORKAROUND_2_FLAG_SHIFT 0 -#define CPUINFO_WORKAROUND_2_FLAG (_AC(1, U) << CPUINFO_WORKAROUND_2_FLAG_SHIFT) - -#ifndef __ASSEMBLY__ - -struct vcpu; - -/* Which VCPU is "current" on this PCPU. */ -DECLARE_PER_CPU(struct vcpu *, curr_vcpu); - -#define current (this_cpu(curr_vcpu)) -#define set_current(vcpu) do { current = (vcpu); } while (0) -#define get_cpu_current(cpu) (per_cpu(curr_vcpu, cpu)) - -/* Per-VCPU state that lives at the top of the stack */ -struct cpu_info { - struct cpu_user_regs guest_cpu_user_regs; - unsigned long elr; - uint32_t flags; -}; - -static inline struct cpu_info *get_cpu_info(void) -{ -#ifdef __clang__ - unsigned long sp; - - asm ("mov %0, sp" : "=r" (sp)); -#else - register unsigned long sp asm ("sp"); -#endif - - return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + - STACK_SIZE - sizeof(struct cpu_info)); -} - -#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) - -#define switch_stack_and_jump(stack, fn) do { \ - asm volatile ("mov sp,%0; b " STR(fn) : : "r" (stack) : "memory" ); \ - unreachable(); \ -} while ( false ) - -#define reset_stack_and_jump(fn) switch_stack_and_jump(get_cpu_info(), fn) - -DECLARE_PER_CPU(unsigned int, cpu_id); - -#define get_processor_id() this_cpu(cpu_id) -#define set_processor_id(id) \ -do { \ - WRITE_SYSREG(__per_cpu_offset[(id)], TPIDR_EL2); \ - this_cpu(cpu_id) = (id); \ -} while ( 0 ) - -#endif - -#endif /* __ARM_CURRENT_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/debugger.h b/xen/include/asm-arm/debugger.h deleted file mode 100644 index ac776efa78..0000000000 --- a/xen/include/asm-arm/debugger.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ARM_DEBUGGER_H__ -#define __ARM_DEBUGGER_H__ - -#define debugger_trap_fatal(v, r) (0) -#define debugger_trap_immediate() ((void) 0) - -#endif /* __ARM_DEBUGGER_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/delay.h b/xen/include/asm-arm/delay.h deleted file mode 100644 index 042907d9d5..0000000000 --- a/xen/include/asm-arm/delay.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _ARM_DELAY_H -#define _ARM_DELAY_H - -extern void udelay(unsigned long usecs); - -#endif /* defined(_ARM_DELAY_H) */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/desc.h b/xen/include/asm-arm/desc.h deleted file mode 100644 index a4d02d5eef..0000000000 --- a/xen/include/asm-arm/desc.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ARCH_DESC_H -#define __ARCH_DESC_H - -#endif /* __ARCH_DESC_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/device.h b/xen/include/asm-arm/device.h deleted file mode 100644 index b5d451e087..0000000000 --- a/xen/include/asm-arm/device.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef __ASM_ARM_DEVICE_H -#define __ASM_ARM_DEVICE_H - -enum device_type -{ - DEV_DT, - DEV_PCI, -}; - -struct dev_archdata { - void *iommu; /* IOMMU private data */ -}; - -/* struct device - The basic device structure */ -struct device -{ - enum device_type type; -#ifdef CONFIG_HAS_DEVICE_TREE - struct dt_device_node *of_node; /* Used by drivers imported from Linux */ -#endif - struct dev_archdata archdata; - struct iommu_fwspec *iommu_fwspec; /* per-device IOMMU instance data */ -}; - -typedef struct device device_t; - -#include - -#define dev_is_pci(dev) ((dev)->type == DEV_PCI) -#define dev_is_dt(dev) ((dev)->type == DEV_DT) - -enum device_class -{ - DEVICE_SERIAL, - DEVICE_IOMMU, - DEVICE_GIC, - DEVICE_PCI_HOSTBRIDGE, - /* Use for error */ - DEVICE_UNKNOWN, -}; - -struct device_desc { - /* Device name */ - const char *name; - /* Device class */ - enum device_class class; - /* List of devices supported by this driver */ - const struct dt_device_match *dt_match; - /* - * Device initialization. - * - * -EAGAIN is used to indicate that device probing is deferred. - */ - int (*init)(struct dt_device_node *dev, const void *data); -}; - -struct acpi_device_desc { - /* Device name */ - const char *name; - /* Device class */ - enum device_class class; - /* type of device supported by the driver */ - const int class_type; - /* Device initialization */ - int (*init)(const void *data); -}; - -/** - * acpi_device_init - Initialize a device - * @class: class of the device (serial, network...) - * @data: specific data for initializing the device - * - * Return 0 on success. - */ -int acpi_device_init(enum device_class class, - const void *data, int class_type); - -/** - * device_init - Initialize a device - * @dev: device to initialize - * @class: class of the device (serial, network...) - * @data: specific data for initializing the device - * - * Return 0 on success. - */ -int device_init(struct dt_device_node *dev, enum device_class class, - const void *data); - -/** - * device_get_type - Get the type of the device - * @dev: device to match - * - * Return the device type on success or DEVICE_ANY on failure - */ -enum device_class device_get_class(const struct dt_device_node *dev); - -#define DT_DEVICE_START(_name, _namestr, _class) \ -static const struct device_desc __dev_desc_##_name __used \ -__section(".dev.info") = { \ - .name = _namestr, \ - .class = _class, \ - -#define DT_DEVICE_END \ -}; - -#define ACPI_DEVICE_START(_name, _namestr, _class) \ -static const struct acpi_device_desc __dev_desc_##_name __used \ -__section(".adev.info") = { \ - .name = _namestr, \ - .class = _class, \ - -#define ACPI_DEVICE_END \ -}; - -#endif /* __ASM_ARM_DEVICE_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/div64.h b/xen/include/asm-arm/div64.h deleted file mode 100644 index 1cd58bc51a..0000000000 --- a/xen/include/asm-arm/div64.h +++ /dev/null @@ -1,250 +0,0 @@ -/* Taken from Linux arch/arm */ -#ifndef __ASM_ARM_DIV64 -#define __ASM_ARM_DIV64 - -#include -#include - -/* - * The semantics of do_div() are: - * - * uint32_t do_div(uint64_t *n, uint32_t base) - * { - * uint32_t remainder = *n % base; - * *n = *n / base; - * return remainder; - * } - * - * In other words, a 64-bit dividend with a 32-bit divisor producing - * a 64-bit result and a 32-bit remainder. To accomplish this optimally - * we call a special __do_div64 helper with completely non standard - * calling convention for arguments and results (beware). - */ - - -#if BITS_PER_LONG == 64 - -# define do_div(n,base) ({ \ - uint32_t __base = (base); \ - uint32_t __rem; \ - __rem = ((uint64_t)(n)) % __base; \ - (n) = ((uint64_t)(n)) / __base; \ - __rem; \ - }) - -#elif BITS_PER_LONG == 32 - -#ifdef __ARMEB__ -#define __xh "r0" -#define __xl "r1" -#else -#define __xl "r0" -#define __xh "r1" -#endif - -#define __do_div_asm(n, base) \ -({ \ - register unsigned int __base asm("r4") = base; \ - register unsigned long long __n asm("r0") = n; \ - register unsigned long long __res asm("r2"); \ - register unsigned int __rem asm(__xh); \ - asm( __asmeq("%0", __xh) \ - __asmeq("%1", "r2") \ - __asmeq("%2", "r0") \ - __asmeq("%3", "r4") \ - "bl __do_div64" \ - : "=r" (__rem), "=r" (__res) \ - : "r" (__n), "r" (__base) \ - : "ip", "lr", "cc"); \ - n = __res; \ - __rem; \ -}) - -#if __GNUC__ < 4 - -/* - * gcc versions earlier than 4.0 are simply too problematic for the - * optimized implementation below. First there is gcc PR 15089 that - * tend to trig on more complex constructs, spurious .global __udivsi3 - * are inserted even if none of those symbols are referenced in the - * generated code, and those gcc versions are not able to do constant - * propagation on long long values anyway. - */ -#define do_div(n, base) __do_div_asm(n, base) - -#elif __GNUC__ >= 4 - -#include - -/* - * If the divisor happens to be constant, we determine the appropriate - * inverse at compile time to turn the division into a few inline - * multiplications instead which is much faster. And yet only if compiling - * for ARMv4 or higher (we need umull/umlal) and if the gcc version is - * sufficiently recent to perform proper long long constant propagation. - * (It is unfortunate that gcc doesn't perform all this internally.) - */ -#define do_div(n, base) \ -({ \ - unsigned int __r, __b = (base); \ - if (!__builtin_constant_p(__b) || __b == 0) { \ - /* non-constant divisor (or zero): slow path */ \ - __r = __do_div_asm(n, __b); \ - } else if ((__b & (__b - 1)) == 0) { \ - /* Trivial: __b is constant and a power of 2 */ \ - /* gcc does the right thing with this code. */ \ - __r = n; \ - __r &= (__b - 1); \ - n /= __b; \ - } else { \ - /* Multiply by inverse of __b: n/b = n*(p/b)/p */ \ - /* We rely on the fact that most of this code gets */ \ - /* optimized away at compile time due to constant */ \ - /* propagation and only a couple inline assembly */ \ - /* instructions should remain. Better avoid any */ \ - /* code construct that might prevent that. */ \ - unsigned long long __res, __x, __t, __m, __n = n; \ - unsigned int __c, __p, __z = 0; \ - /* preserve low part of n for reminder computation */ \ - __r = __n; \ - /* determine number of bits to represent __b */ \ - __p = 1 << __div64_fls(__b); \ - /* compute __m = ((__p << 64) + __b - 1) / __b */ \ - __m = (~0ULL / __b) * __p; \ - __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \ - /* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \ - __x = ~0ULL / __b * __b - 1; \ - __res = (__m & 0xffffffff) * (__x & 0xffffffff); \ - __res >>= 32; \ - __res += (__m & 0xffffffff) * (__x >> 32); \ - __t = __res; \ - __res += (__x & 0xffffffff) * (__m >> 32); \ - __t = (__res < __t) ? (1ULL << 32) : 0; \ - __res = (__res >> 32) + __t; \ - __res += (__m >> 32) * (__x >> 32); \ - __res /= __p; \ - /* Now sanitize and optimize what we've got. */ \ - if (~0ULL % (__b / (__b & -__b)) == 0) { \ - /* those cases can be simplified with: */ \ - __n /= (__b & -__b); \ - __m = ~0ULL / (__b / (__b & -__b)); \ - __p = 1; \ - __c = 1; \ - } else if (__res != __x / __b) { \ - /* We can't get away without a correction */ \ - /* to compensate for bit truncation errors. */ \ - /* To avoid it we'd need an additional bit */ \ - /* to represent __m which would overflow it. */ \ - /* Instead we do m=p/b and n/b=(n*m+m)/p. */ \ - __c = 1; \ - /* Compute __m = (__p << 64) / __b */ \ - __m = (~0ULL / __b) * __p; \ - __m += ((~0ULL % __b + 1) * __p) / __b; \ - } else { \ - /* Reduce __m/__p, and try to clear bit 31 */ \ - /* of __m when possible otherwise that'll */ \ - /* need extra overflow handling later. */ \ - unsigned int __bits = -(__m & -__m); \ - __bits |= __m >> 32; \ - __bits = (~__bits) << 1; \ - /* If __bits == 0 then setting bit 31 is */ \ - /* unavoidable. Simply apply the maximum */ \ - /* possible reduction in that case. */ \ - /* Otherwise the MSB of __bits indicates the */ \ - /* best reduction we should apply. */ \ - if (!__bits) { \ - __p /= (__m & -__m); \ - __m /= (__m & -__m); \ - } else { \ - __p >>= __div64_fls(__bits); \ - __m >>= __div64_fls(__bits); \ - } \ - /* No correction needed. */ \ - __c = 0; \ - } \ - /* Now we have a combination of 2 conditions: */ \ - /* 1) whether or not we need a correction (__c), and */ \ - /* 2) whether or not there might be an overflow in */ \ - /* the cross product (__m & ((1<<63) | (1<<31))) */ \ - /* Select the best insn combination to perform the */ \ - /* actual __m * __n / (__p << 64) operation. */ \ - if (!__c) { \ - asm ( "umull %Q0, %R0, %1, %Q2\n\t" \ - "mov %Q0, #0" \ - : "=&r" (__res) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ - __res = __m; \ - asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \ - "mov %Q0, #0" \ - : "+&r" (__res) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } else { \ - asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \ - "cmn %Q0, %Q1\n\t" \ - "adcs %R0, %R0, %R1\n\t" \ - "adc %Q0, %3, #0" \ - : "=&r" (__res) \ - : "r" (__m), "r" (__n), "r" (__z) \ - : "cc" ); \ - } \ - if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ - asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \ - "umlal %R0, %Q0, %Q1, %R2\n\t" \ - "mov %R0, #0\n\t" \ - "umlal %Q0, %R0, %R1, %R2" \ - : "+&r" (__res) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } else { \ - asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \ - "umlal %R0, %1, %Q2, %R3\n\t" \ - "mov %R0, #0\n\t" \ - "adds %Q0, %1, %Q0\n\t" \ - "adc %R0, %R0, #0\n\t" \ - "umlal %Q0, %R0, %R2, %R3" \ - : "+&r" (__res), "+&r" (__z) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } \ - __res /= __p; \ - /* The reminder can be computed with 32-bit regs */ \ - /* only, and gcc is good at that. */ \ - { \ - unsigned int __res0 = __res; \ - unsigned int __b0 = __b; \ - __r -= __res0 * __b0; \ - } \ - /* BUG_ON(__r >= __b || __res * __b + __r != n); */ \ - n = __res; \ - } \ - __r; \ -}) - -/* our own fls implementation to make sure constant propagation is fine */ -#define __div64_fls(bits) \ -({ \ - unsigned int __left = (bits), __nr = 0; \ - if (__left & 0xffff0000) __nr += 16, __left >>= 16; \ - if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \ - if (__left & 0x000000f0) __nr += 4, __left >>= 4; \ - if (__left & 0x0000000c) __nr += 2, __left >>= 2; \ - if (__left & 0x00000002) __nr += 1; \ - __nr; \ -}) - -#endif /* GCC version */ - -#endif /* BITS_PER_LONG */ - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h deleted file mode 100644 index 9b3647587a..0000000000 --- a/xen/include/asm-arm/domain.h +++ /dev/null @@ -1,279 +0,0 @@ -#ifndef __ASM_DOMAIN_H__ -#define __ASM_DOMAIN_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct hvm_domain -{ - uint64_t params[HVM_NR_PARAMS]; -}; - -#ifdef CONFIG_ARM_64 -enum domain_type { - DOMAIN_32BIT, - DOMAIN_64BIT, -}; -#define is_32bit_domain(d) ((d)->arch.type == DOMAIN_32BIT) -#define is_64bit_domain(d) ((d)->arch.type == DOMAIN_64BIT) -#else -#define is_32bit_domain(d) (1) -#define is_64bit_domain(d) (0) -#endif - -/* The hardware domain has always its memory direct mapped. */ -#define is_domain_direct_mapped(d) is_hardware_domain(d) - -struct vtimer { - struct vcpu *v; - int irq; - struct timer timer; - register_t ctl; - uint64_t cval; -}; - -struct arch_domain -{ -#ifdef CONFIG_ARM_64 - enum domain_type type; -#endif - - /* Virtual MMU */ - struct p2m_domain p2m; - - struct hvm_domain hvm; - - struct vmmio vmmio; - - /* Continuable domain_relinquish_resources(). */ - unsigned int rel_priv; - - struct { - uint64_t offset; - } virt_timer_base; - - struct vgic_dist vgic; - - struct vuart { -#define VUART_BUF_SIZE 128 - char *buf; - int idx; - const struct vuart_info *info; - spinlock_t lock; - } vuart; - - unsigned int evtchn_irq; -#ifdef CONFIG_ACPI - void *efi_acpi_table; - paddr_t efi_acpi_gpa; - paddr_t efi_acpi_len; -#endif - - /* Monitor options */ - struct { - uint8_t privileged_call_enabled : 1; - } monitor; - -#ifdef CONFIG_SBSA_VUART_CONSOLE - struct vpl011 vpl011; -#endif - -#ifdef CONFIG_TEE - void *tee; -#endif -} __cacheline_aligned; - -struct arch_vcpu -{ - struct { -#ifdef CONFIG_ARM_32 - register_t r4; - register_t r5; - register_t r6; - register_t r7; - register_t r8; - register_t r9; - register_t sl; -#else - register_t x19; - register_t x20; - register_t x21; - register_t x22; - register_t x23; - register_t x24; - register_t x25; - register_t x26; - register_t x27; - register_t x28; -#endif - register_t fp; - register_t sp; - register_t pc; - } saved_context; - - void *stack; - - /* - * Points into ->stack, more convenient than doing pointer arith - * all the time. - */ - struct cpu_info *cpu_info; - - /* Fault Status */ -#ifdef CONFIG_ARM_32 - uint32_t dfsr; - uint32_t dfar, ifar; -#else - uint64_t far; - uint32_t esr; -#endif - - uint32_t ifsr; /* 32-bit guests only */ - uint32_t afsr0, afsr1; - - /* MMU */ - register_t vbar; - register_t ttbcr; - uint64_t ttbr0, ttbr1; - - uint32_t dacr; /* 32-bit guests only */ - uint64_t par; -#ifdef CONFIG_ARM_32 - uint32_t mair0, mair1; - uint32_t amair0, amair1; -#else - uint64_t mair; - uint64_t amair; -#endif - - /* Control Registers */ - register_t sctlr; - register_t actlr; - uint32_t cpacr; - - uint32_t contextidr; - register_t tpidr_el0; - register_t tpidr_el1; - register_t tpidrro_el0; - - /* HYP configuration */ - register_t hcr_el2; - register_t mdcr_el2; - - uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */ -#ifdef CONFIG_ARM_32 - /* - * ARMv8 only supports a trivial implementation on Jazelle when in AArch32 - * mode and therefore has no extended control registers. - */ - uint32_t joscr, jmcr; -#endif - - /* Float-pointer */ - struct vfp_state vfp; - - /* CP 15 */ - uint32_t csselr; - register_t vmpidr; - - /* Holds gic context data */ - union gic_state_data gic; - uint64_t lr_mask; - - struct vgic_cpu vgic; - - /* Timer registers */ - register_t cntkctl; - - struct vtimer phys_timer; - struct vtimer virt_timer; - bool vtimer_initialized; - - /* - * The full P2M may require some cleaning (e.g when emulation - * set/way). As the action can take a long time, it requires - * preemption. It is deferred until we return to guest, where we can - * more easily check for softirqs and preempt the vCPU safely. - */ - bool need_flush_to_ram; - -} __cacheline_aligned; - -void vcpu_show_execution_state(struct vcpu *); -void vcpu_show_registers(const struct vcpu *); -void vcpu_switch_to_aarch64_mode(struct vcpu *); - -/* - * Due to the restriction of GICv3, the number of vCPUs in AFF0 is - * limited to 16, thus only the first 4 bits of AFF0 are legal. We will - * use the first 2 affinity levels here, expanding the number of vCPU up - * to 4096(==16*256), which is more than the PEs that GIC-500 supports. - * - * Since we don't save information of vCPU's topology (affinity) in - * vMPIDR at the moment, we map the vcpuid to the vMPIDR linearly. - */ -static inline unsigned int vaffinity_to_vcpuid(register_t vaff) -{ - unsigned int vcpuid; - - vaff &= MPIDR_HWID_MASK; - - vcpuid = MPIDR_AFFINITY_LEVEL(vaff, 0); - vcpuid |= MPIDR_AFFINITY_LEVEL(vaff, 1) << 4; - - return vcpuid; -} - -static inline register_t vcpuid_to_vaffinity(unsigned int vcpuid) -{ - register_t vaff; - - /* - * Right now only AFF0 and AFF1 are supported in virtual affinity. - * Since only the first 4 bits in AFF0 are used in GICv3, the - * available bits are 12 (4+8). - */ - BUILD_BUG_ON(!(MAX_VIRT_CPUS < ((1 << 12)))); - - vaff = (vcpuid & 0x0f) << MPIDR_LEVEL_SHIFT(0); - vaff |= ((vcpuid >> 4) & MPIDR_LEVEL_MASK) << MPIDR_LEVEL_SHIFT(1); - - return vaff; -} - -static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void) -{ - return xmalloc(struct vcpu_guest_context); -} - -static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc) -{ - xfree(vgc); -} - -static inline void arch_vcpu_block(struct vcpu *v) {} - -#define arch_vm_assist_valid_mask(d) (1UL << VMASST_TYPE_runstate_update_flag) - -/* vPCI is not available on Arm */ -#define has_vpci(d) ({ (void)(d); false; }) - -#endif /* __ASM_DOMAIN_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/domain_build.h b/xen/include/asm-arm/domain_build.h deleted file mode 100644 index 34ceddc995..0000000000 --- a/xen/include/asm-arm/domain_build.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef __ASM_DOMAIN_BUILD_H__ -#define __ASM_DOMAIN_BUILD_H__ - -#include -#include - -int map_irq_to_domain(struct domain *d, unsigned int irq, - bool need_mapping, const char *devname); -int make_chosen_node(const struct kernel_info *kinfo); -void evtchn_allocate(struct domain *d); - -#ifndef CONFIG_ACPI -static inline int prepare_acpi(struct domain *d, struct kernel_info *kinfo) -{ - /* Only booting with ACPI will hit here */ - BUG(); - return -EINVAL; -} -#else -int prepare_acpi(struct domain *d, struct kernel_info *kinfo); -#endif -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/early_printk.h b/xen/include/asm-arm/early_printk.h deleted file mode 100644 index 8dc911cf48..0000000000 --- a/xen/include/asm-arm/early_printk.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * printk() for use before the final page tables are setup. - * - * Copyright (C) 2012 Citrix Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ARM_EARLY_PRINTK_H__ -#define __ARM_EARLY_PRINTK_H__ - -#include - -#ifdef CONFIG_EARLY_PRINTK - -/* need to add the uart address offset in page to the fixmap address */ -#define EARLY_UART_VIRTUAL_ADDRESS \ - (FIXMAP_ADDR(FIXMAP_CONSOLE) + (CONFIG_EARLY_UART_BASE_ADDRESS & ~PAGE_MASK)) - -#endif /* !CONFIG_EARLY_PRINTK */ - -#endif diff --git a/xen/include/asm-arm/efibind.h b/xen/include/asm-arm/efibind.h deleted file mode 100644 index 09dca7a8c9..0000000000 --- a/xen/include/asm-arm/efibind.h +++ /dev/null @@ -1,2 +0,0 @@ -#include -#include diff --git a/xen/include/asm-arm/elf.h b/xen/include/asm-arm/elf.h deleted file mode 100644 index 9e436e7556..0000000000 --- a/xen/include/asm-arm/elf.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __ARM_ELF_H__ -#define __ARM_ELF_H__ - -typedef struct { - unsigned long r0; - unsigned long r1; - unsigned long r2; - unsigned long r3; - unsigned long r4; - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long r8; - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long sp; - unsigned long lr; - unsigned long pc; -} ELF_Gregset; - -#endif /* __ARM_ELF_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/event.h b/xen/include/asm-arm/event.h deleted file mode 100644 index b14c166ad6..0000000000 --- a/xen/include/asm-arm/event.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef __ASM_EVENT_H__ -#define __ASM_EVENT_H__ - -#include - -void vcpu_kick(struct vcpu *v); -void vcpu_mark_events_pending(struct vcpu *v); -void vcpu_update_evtchn_irq(struct vcpu *v); -void vcpu_block_unless_event_pending(struct vcpu *v); - -static inline int vcpu_event_delivery_is_enabled(struct vcpu *v) -{ - struct cpu_user_regs *regs = &v->arch.cpu_info->guest_cpu_user_regs; - return !(regs->cpsr & PSR_IRQ_MASK); -} - -static inline int local_events_need_delivery_nomask(void) -{ - /* XXX: if the first interrupt has already been delivered, we should - * check whether any other interrupts with priority higher than the - * one in GICV_IAR are in the lr_pending queue or in the LR - * registers and return 1 only in that case. - * In practice the guest interrupt handler should run with - * interrupts disabled so this shouldn't be a problem in the general - * case. - */ - if ( vgic_vcpu_pending_irq(current) ) - return 1; - - if ( !vcpu_info(current, evtchn_upcall_pending) ) - return 0; - - return vgic_evtchn_irq_pending(current); -} - -static inline int local_events_need_delivery(void) -{ - if ( !vcpu_event_delivery_is_enabled(current) ) - return 0; - return local_events_need_delivery_nomask(); -} - -static inline void local_event_delivery_enable(void) -{ - struct cpu_user_regs *regs = guest_cpu_user_regs(); - regs->cpsr &= ~PSR_IRQ_MASK; -} - -/* No arch specific virq definition now. Default to global. */ -static inline bool arch_virq_is_global(unsigned int virq) -{ - return true; -} - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/exynos4210-uart.h b/xen/include/asm-arm/exynos4210-uart.h deleted file mode 100644 index e2ab4a43e4..0000000000 --- a/xen/include/asm-arm/exynos4210-uart.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * xen/include/asm-arm/exynos4210-uart.h - * - * Common constant definition between early printk and the UART driver - * for the exynos 4210 UART - * - * Julien Grall - * Copyright (c) 2013 Linaro Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_EXYNOS4210_H -#define __ASM_ARM_EXYNOS4210_H - - -/* - * this value is only valid for UART 2 and UART 3 - * XXX: define per UART - */ -#define FIFO_MAX_SIZE 16 - -/* register addresses */ -#define ULCON (0x00) -#define UCON (0x04) -#define UFCON (0x08) -#define UMCON (0x0c) -#define UTRSTAT (0x10) -#define UERSTAT (0x14) -#define UFSTAT (0x18) -#define UMSTAT (0x1c) -#define UTXH (0x20) -#define URXH (0x24) -#define UBRDIV (0x28) -#define UFRACVAL (0x2c) -#define UINTP (0x30) -#define UINTS (0x34) -#define UINTM (0x38) - -/* UCON */ -#define UCON_RX_IRQ (1 << 0) -#define UCON_TX_IRQ (1 << 2) -#define UCON_RX_TIMEOUT (1 << 7) - -/* - * FIXME: IRQ_LEVEL should be 1 << n but with this value, the IRQ - * handler will never end... - */ -#define UCON_RX_IRQ_LEVEL (0 << 8) -#define UCON_TX_IRQ_LEVEL (0 << 9) - -/* ULCON */ -#define ULCON_STOPB_SHIFT 2 -#define ULCON_PARITY_SHIFT 3 - -/* UFCON */ -#define UFCON_FIFO_TX_RESET (1 << 2) -#define UFCON_FIFO_RX_RESET (1 << 1) -#define UFCON_FIFO_RESET (UFCON_FIFO_TX_RESET | UFCON_FIFO_RX_RESET) -#define UFCON_FIFO_EN (1 << 0) - -#define UFCON_FIFO_TX_TRIGGER (0x6 << 8) - -/* UMCON */ -#define UMCON_INT_EN (1 << 3) - -/* UERSTAT */ -#define UERSTAT_OVERRUN (1 << 0) -#define UERSTAT_PARITY (1 << 1) -#define UERSTAT_FRAME (1 << 2) -#define UERSTAT_BREAK (1 << 3) - -/* UFSTAT */ -#define UFSTAT_TX_FULL (1 << 24) -#define UFSTAT_TX_COUNT_SHIFT (16) -#define UFSTAT_TX_COUNT_MASK (0xff << UFSTAT_TX_COUNT_SHIFT) -#define UFSTAT_RX_FULL (1 << 8) -#define UFSTAT_RX_COUNT_SHIFT (0) -#define UFSTAT_RX_COUNT_MASK (0xff << UFSTAT_RX_COUNT_SHIFT) - -/* UTRSTAT */ -#define UTRSTAT_TXFE (1 << 1) -#define UTRSTAT_TXE (1 << 2) - -/* URHX */ -#define URXH_DATA_MASK (0xff) - -/* Interrupt bits (UINTP, UINTS, UINTM) */ -#define UINTM_MODEM (1 << 3) -#define UINTM_TXD (1 << 2) -#define UINTM_ERROR (1 << 1) -#define UINTM_RXD (1 << 0) -#define UINTM_ALLI (UINTM_MODEM | UINTM_TXD | UINTM_ERROR | UINTM_RXD) - -#endif /* __ASM_ARM_EXYNOS4210_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/flushtlb.h b/xen/include/asm-arm/flushtlb.h deleted file mode 100644 index 125a141975..0000000000 --- a/xen/include/asm-arm/flushtlb.h +++ /dev/null @@ -1,77 +0,0 @@ -#ifndef __ASM_ARM_FLUSHTLB_H__ -#define __ASM_ARM_FLUSHTLB_H__ - -#include - -/* - * Filter the given set of CPUs, removing those that definitely flushed their - * TLB since @page_timestamp. - */ -/* XXX lazy implementation just doesn't clear anything.... */ -static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) {} - -#define tlbflush_current_time() (0) - -static inline void page_set_tlbflush_timestamp(struct page_info *page) -{ - page->tlbflush_timestamp = tlbflush_current_time(); -} - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -/* Flush specified CPUs' TLBs */ -void arch_flush_tlb_mask(const cpumask_t *mask); - -/* - * Flush a range of VA's hypervisor mappings from the TLB of the local - * processor. - */ -static inline void flush_xen_tlb_range_va_local(vaddr_t va, - unsigned long size) -{ - vaddr_t end = va + size; - - dsb(sy); /* Ensure preceding are visible */ - while ( va < end ) - { - __flush_xen_tlb_one_local(va); - va += PAGE_SIZE; - } - dsb(sy); /* Ensure completion of the TLB flush */ - isb(); -} - -/* - * Flush a range of VA's hypervisor mappings from the TLB of all - * processors in the inner-shareable domain. - */ -static inline void flush_xen_tlb_range_va(vaddr_t va, - unsigned long size) -{ - vaddr_t end = va + size; - - dsb(sy); /* Ensure preceding are visible */ - while ( va < end ) - { - __flush_xen_tlb_one(va); - va += PAGE_SIZE; - } - dsb(sy); /* Ensure completion of the TLB flush */ - isb(); -} - -#endif /* __ASM_ARM_FLUSHTLB_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h deleted file mode 100644 index c7f0c343d1..0000000000 --- a/xen/include/asm-arm/gic.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * ARM Generic Interrupt Controller support - * - * Tim Deegan - * Copyright (c) 2011 Citrix Systems. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_GIC_H__ -#define __ASM_ARM_GIC_H__ - -#define NR_GIC_LOCAL_IRQS NR_LOCAL_IRQS -#define NR_GIC_SGI 16 - -#define GICD_CTLR (0x000) -#define GICD_TYPER (0x004) -#define GICD_IIDR (0x008) -#define GICD_IGROUPR (0x080) -#define GICD_IGROUPRN (0x0FC) -#define GICD_ISENABLER (0x100) -#define GICD_ISENABLERN (0x17C) -#define GICD_ICENABLER (0x180) -#define GICD_ICENABLERN (0x1fC) -#define GICD_ISPENDR (0x200) -#define GICD_ISPENDRN (0x27C) -#define GICD_ICPENDR (0x280) -#define GICD_ICPENDRN (0x2FC) -#define GICD_ISACTIVER (0x300) -#define GICD_ISACTIVERN (0x37C) -#define GICD_ICACTIVER (0x380) -#define GICD_ICACTIVERN (0x3FC) -#define GICD_IPRIORITYR (0x400) -#define GICD_IPRIORITYRN (0x7F8) -#define GICD_ITARGETSR (0x800) -#define GICD_ITARGETSR7 (0x81C) -#define GICD_ITARGETSR8 (0x820) -#define GICD_ITARGETSRN (0xBF8) -#define GICD_ICFGR (0xC00) -#define GICD_ICFGR1 (0xC04) -#define GICD_ICFGR2 (0xC08) -#define GICD_ICFGRN (0xCFC) -#define GICD_NSACR (0xE00) -#define GICD_NSACRN (0xEFC) -#define GICD_SGIR (0xF00) -#define GICD_CPENDSGIR (0xF10) -#define GICD_CPENDSGIRN (0xF1C) -#define GICD_SPENDSGIR (0xF20) -#define GICD_SPENDSGIRN (0xF2C) -#define GICD_ICPIDR2 (0xFE8) - -#define GICD_SGI_TARGET_LIST_SHIFT (24) -#define GICD_SGI_TARGET_LIST_MASK (0x3UL << GICD_SGI_TARGET_LIST_SHIFT) -#define GICD_SGI_TARGET_LIST (0UL<> 3) /* GICH_LR and GICH_VMCR only support - 5 bits for guest irq priority */ - -#define GICH_LR_PENDING 1 -#define GICH_LR_ACTIVE 2 - -#ifndef __ASSEMBLY__ -#include -#include - -#define DT_COMPAT_GIC_CORTEX_A15 "arm,cortex-a15-gic" - -#define DT_MATCH_GIC_V2 \ - DT_MATCH_COMPATIBLE(DT_COMPAT_GIC_CORTEX_A15), \ - DT_MATCH_COMPATIBLE("arm,cortex-a7-gic"), \ - DT_MATCH_COMPATIBLE("arm,gic-400") - -#define DT_MATCH_GIC_V3 DT_MATCH_COMPATIBLE("arm,gic-v3") - -#ifdef CONFIG_GICV3 -/* - * GICv3 registers that needs to be saved/restored - */ -struct gic_v3 { - register_t vmcr, sre_el1; - register_t apr0[4]; - register_t apr1[4]; - uint64_t lr[16]; -}; -#endif - -/* - * GICv2 register that needs to be saved/restored - * on VCPU context switch - */ -struct gic_v2 { - uint32_t hcr; - uint32_t vmcr; - uint32_t apr; - uint32_t lr[64]; -}; - -/* - * Union to hold underlying hw version context information - */ -union gic_state_data { - struct gic_v2 v2; -#ifdef CONFIG_GICV3 - struct gic_v3 v3; -#endif -}; - -/* - * Decode LR register content. - * The LR register format is different for GIC HW version - */ -struct gic_lr { - /* Virtual IRQ */ - uint32_t virq; - uint8_t priority; - bool active; - bool pending; - bool hw_status; - union - { - /* Only filled when there are a corresponding pIRQ (hw_state = true) */ - struct - { - uint32_t pirq; - } hw; - /* Only filled when there are no corresponding pIRQ (hw_state = false) */ - struct - { - bool eoi; - uint8_t source; /* GICv2 only */ - } virt; - }; -}; - -enum gic_version { - GIC_INVALID = 0, /* the default until explicitly set up */ - GIC_V2, - GIC_V3, -}; - -DECLARE_PER_CPU(uint64_t, lr_mask); - -extern enum gic_version gic_hw_version(void); - -/* Program the IRQ type into the GIC */ -void gic_set_irq_type(struct irq_desc *desc, unsigned int type); - -/* Program the GIC to route an interrupt */ -extern void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority); -extern int gic_route_irq_to_guest(struct domain *, unsigned int virq, - struct irq_desc *desc, - unsigned int priority); - -/* Remove an IRQ passthrough to a guest */ -int gic_remove_irq_from_guest(struct domain *d, unsigned int virq, - struct irq_desc *desc); - -extern void gic_clear_pending_irqs(struct vcpu *v); - -extern void init_maintenance_interrupt(void); -extern void gic_raise_guest_irq(struct vcpu *v, unsigned int irq, - unsigned int priority); -extern void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq); - -/* Accept an interrupt from the GIC and dispatch its handler */ -extern void gic_interrupt(struct cpu_user_regs *regs, int is_fiq); -/* Find the interrupt controller and set up the callback to translate - * device tree IRQ. - */ -extern void gic_preinit(void); -/* Bring up the interrupt controller, and report # cpus attached */ -extern void gic_init(void); -/* Bring up a secondary CPU's per-CPU GIC interface */ -extern void gic_init_secondary_cpu(void); -/* Take down a CPU's per-CPU GIC interface */ -extern void gic_disable_cpu(void); -/* setup the gic virtual interface for a guest */ -extern int gicv_setup(struct domain *d); - -/* Context switch */ -extern void gic_save_state(struct vcpu *v); -extern void gic_restore_state(struct vcpu *v); - -/* SGI (AKA IPIs) */ -enum gic_sgi { - GIC_SGI_EVENT_CHECK, - GIC_SGI_DUMP_STATE, - GIC_SGI_CALL_FUNCTION, - GIC_SGI_MAX, -}; - -/* SGI irq mode types */ -enum gic_sgi_mode { - SGI_TARGET_LIST, - SGI_TARGET_OTHERS, - SGI_TARGET_SELF, -}; - -extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi); -extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi); -extern void send_SGI_self(enum gic_sgi sgi); -extern void send_SGI_allbutself(enum gic_sgi sgi); - -/* print useful debug info */ -extern void gic_dump_info(struct vcpu *v); -extern void gic_dump_vgic_info(struct vcpu *v); - -/* Number of interrupt lines */ -extern unsigned int gic_number_lines(void); - -/* IRQ translation function for the device tree */ -int gic_irq_xlate(const u32 *intspec, unsigned int intsize, - unsigned int *out_hwirq, unsigned int *out_type); - -struct gic_info { - /* GIC version */ - enum gic_version hw_version; - /* Number of GIC lines supported */ - unsigned int nr_lines; - /* Number of LR registers */ - uint8_t nr_lrs; - /* Maintenance irq number */ - unsigned int maintenance_irq; - /* Pointer to the device tree node representing the interrupt controller */ - const struct dt_device_node *node; -}; - -struct gic_hw_operations { - /* Hold GIC HW information */ - const struct gic_info *info; - /* Initialize the GIC and the boot CPU */ - int (*init)(void); - /* Save GIC registers */ - void (*save_state)(struct vcpu *); - /* Restore GIC registers */ - void (*restore_state)(const struct vcpu *); - /* Dump GIC LR register information */ - void (*dump_state)(const struct vcpu *); - - /* hw_irq_controller to enable/disable/eoi host irq */ - hw_irq_controller *gic_host_irq_type; - - /* hw_irq_controller to enable/disable/eoi guest irq */ - hw_irq_controller *gic_guest_irq_type; - - /* End of Interrupt */ - void (*eoi_irq)(struct irq_desc *irqd); - /* Deactivate/reduce priority of irq */ - void (*deactivate_irq)(struct irq_desc *irqd); - /* Read IRQ id and Ack */ - unsigned int (*read_irq)(void); - /* Force the active state of an IRQ by accessing the distributor */ - void (*set_active_state)(struct irq_desc *irqd, bool state); - /* Force the pending state of an IRQ by accessing the distributor */ - void (*set_pending_state)(struct irq_desc *irqd, bool state); - /* Set IRQ type */ - void (*set_irq_type)(struct irq_desc *desc, unsigned int type); - /* Set IRQ priority */ - void (*set_irq_priority)(struct irq_desc *desc, unsigned int priority); - /* Send SGI */ - void (*send_SGI)(enum gic_sgi sgi, enum gic_sgi_mode irqmode, - const cpumask_t *online_mask); - /* Disable CPU physical and virtual interfaces */ - void (*disable_interface)(void); - /* Update LR register with state and priority */ - void (*update_lr)(int lr, unsigned int virq, uint8_t priority, - unsigned int hw_irq, unsigned int state); - /* Update HCR status register */ - void (*update_hcr_status)(uint32_t flag, bool set); - /* Clear LR register */ - void (*clear_lr)(int lr); - /* Read LR register and populate gic_lr structure */ - void (*read_lr)(int lr, struct gic_lr *); - /* Write LR register from gic_lr structure */ - void (*write_lr)(int lr, const struct gic_lr *); - /* Read VMCR priority */ - unsigned int (*read_vmcr_priority)(void); - /* Read APRn register */ - unsigned int (*read_apr)(int apr_reg); - /* Query the pending state of an interrupt at the distributor level. */ - bool (*read_pending_state)(struct irq_desc *irqd); - /* Secondary CPU init */ - int (*secondary_init)(void); - /* Create GIC node for the hardware domain */ - int (*make_hwdom_dt_node)(const struct domain *d, - const struct dt_device_node *gic, void *fdt); -#ifdef CONFIG_ACPI - /* Create MADT table for the hardware domain */ - int (*make_hwdom_madt)(const struct domain *d, u32 offset); - /* Query the size of hardware domain madt table */ - unsigned long (*get_hwdom_extra_madt_size)(const struct domain *d); -#endif - /* Map extra GIC MMIO, irqs and other hw stuffs to the hardware domain. */ - int (*map_hwdom_extra_mappings)(struct domain *d); - /* Deny access to GIC regions */ - int (*iomem_deny_access)(const struct domain *d); - /* Handle LPIs, which require special handling */ - void (*do_LPI)(unsigned int lpi); -}; - -extern const struct gic_hw_operations *gic_hw_ops; - -static inline unsigned int gic_get_nr_lrs(void) -{ - return gic_hw_ops->info->nr_lrs; -} - -/* - * Set the active state of an IRQ. This should be used with care, as this - * directly forces the active bit, without considering the GIC state machine. - * For private IRQs this only works for those of the current CPU. - * - * This function should only be called for interrupts routed to the - * guest. The flow of interrupts routed to Xen is not able cope with - * software changes to the active state. - */ -static inline void gic_set_active_state(struct irq_desc *irqd, bool state) -{ - ASSERT(test_bit(_IRQ_GUEST, &irqd->status)); - gic_hw_ops->set_active_state(irqd, state); -} - -/* - * Set the pending state of an IRQ. This should be used with care, as this - * directly forces the pending bit, without considering the GIC state machine. - * For private IRQs this only works for those of the current CPU. - */ -static inline void gic_set_pending_state(struct irq_desc *irqd, bool state) -{ - gic_hw_ops->set_pending_state(irqd, state); -} - -/* - * Read the pending state of an interrupt from the distributor. - * For private IRQs this only works for those of the current CPU. - */ -static inline bool gic_read_pending_state(struct irq_desc *irqd) -{ - return gic_hw_ops->read_pending_state(irqd); -} - -void register_gic_ops(const struct gic_hw_operations *ops); -int gic_make_hwdom_dt_node(const struct domain *d, - const struct dt_device_node *gic, - void *fdt); - -#ifdef CONFIG_ACPI -int gic_make_hwdom_madt(const struct domain *d, u32 offset); -unsigned long gic_get_hwdom_madt_size(const struct domain *d); -#endif - -int gic_map_hwdom_extra_mappings(struct domain *d); -int gic_iomem_deny_access(const struct domain *d); - -#endif /* __ASSEMBLY__ */ -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/gic_v3_defs.h b/xen/include/asm-arm/gic_v3_defs.h deleted file mode 100644 index 34ed5f857d..0000000000 --- a/xen/include/asm-arm/gic_v3_defs.h +++ /dev/null @@ -1,220 +0,0 @@ -/* - * ARM Generic Interrupt Controller v3 definitions - * - * Vijaya Kumar K - * Copyright (c) 2014 Cavium Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_GIC_V3_DEFS_H__ -#define __ASM_ARM_GIC_V3_DEFS_H__ - -#include - -/* - * Additional registers defined in GIC v3. - * Common GICD registers are defined in gic.h - */ - -#define GICD_STATUSR (0x010) -#define GICD_SETSPI_NSR (0x040) -#define GICD_CLRSPI_NSR (0x048) -#define GICD_SETSPI_SR (0x050) -#define GICD_CLRSPI_SR (0x058) -#define GICD_IROUTER (0x6000) -#define GICD_IROUTER32 (0x6100) -#define GICD_IROUTER1019 (0x7FD8) -#define GICD_PIDR2 (0xFFE8) - -/* Common between GICD_PIDR2 and GICR_PIDR2 */ -#define GIC_PIDR2_ARCH_MASK (0xf0) -#define GIC_PIDR2_ARCH_GICv3 (0x30) -#define GIC_PIDR2_ARCH_GICv4 (0x40) - -#define GICC_SRE_EL2_SRE (1UL << 0) -#define GICC_SRE_EL2_DFB (1UL << 1) -#define GICC_SRE_EL2_DIB (1UL << 2) -#define GICC_SRE_EL2_ENEL1 (1UL << 3) - -#define GICC_IAR_INTID_MASK (0xFFFFFF) - -/* Additional bits in GICD_TYPER defined by GICv3 */ -#define GICD_TYPE_ID_BITS_SHIFT 19 -#define GICD_TYPE_ID_BITS(r) ((((r) >> GICD_TYPE_ID_BITS_SHIFT) & 0x1f) + 1) - -#define GICD_TYPE_LPIS (1U << 17) - -#define GICD_CTLR_RWP (1UL << 31) -#define GICD_CTLR_ARE_NS (1U << 4) -#define GICD_CTLR_ENABLE_G1A (1U << 1) -#define GICD_CTLR_ENABLE_G1 (1U << 0) -#define GICD_IROUTER_SPI_MODE_ANY (1UL << 31) - -#define GICC_CTLR_EL1_EOImode_drop (1U << 1) - -#define GICR_WAKER_ProcessorSleep (1U << 1) -#define GICR_WAKER_ChildrenAsleep (1U << 2) - -#define GICR_SYNCR_NOT_BUSY 1 -/* - * Implementation defined value JEP106? - * use physical hw value for now - */ -#define GICV3_GICD_IIDR_VAL 0x34c -#define GICV3_GICR_IIDR_VAL GICV3_GICD_IIDR_VAL - -/* Two pages for the RD_base and SGI_base register frame. */ -#define GICV3_GICR_SIZE (2 * SZ_64K) - -#define GICR_CTLR (0x0000) -#define GICR_IIDR (0x0004) -#define GICR_TYPER (0x0008) -#define GICR_STATUSR (0x0010) -#define GICR_WAKER (0x0014) -#define GICR_SETLPIR (0x0040) -#define GICR_CLRLPIR (0x0048) -#define GICR_PROPBASER (0x0070) -#define GICR_PENDBASER (0x0078) -#define GICR_INVLPIR (0x00A0) -#define GICR_INVALLR (0x00B0) -#define GICR_SYNCR (0x00C0) -#define GICR_PIDR2 GICD_PIDR2 - -/* GICR for SGI's & PPI's */ - -#define GICR_IGROUPR0 (0x0080) -#define GICR_ISENABLER0 (0x0100) -#define GICR_ICENABLER0 (0x0180) -#define GICR_ISPENDR0 (0x0200) -#define GICR_ICPENDR0 (0x0280) -#define GICR_ISACTIVER0 (0x0300) -#define GICR_ICACTIVER0 (0x0380) -#define GICR_IPRIORITYR0 (0x0400) -#define GICR_IPRIORITYR7 (0x041C) -#define GICR_ICFGR0 (0x0C00) -#define GICR_ICFGR1 (0x0C04) -#define GICR_IGRPMODR0 (0x0D00) -#define GICR_NSACR (0x0E00) - -#define GICR_CTLR_ENABLE_LPIS (1U << 0) - -#define GICR_TYPER_PLPIS (1U << 0) -#define GICR_TYPER_VLPIS (1U << 1) -#define GICR_TYPER_LAST (1U << 4) -#define GICR_TYPER_PROC_NUM_SHIFT 8 -#define GICR_TYPER_PROC_NUM_MASK (0xffff << GICR_TYPER_PROC_NUM_SHIFT) - -/* For specifying the inner cacheability type only */ -#define GIC_BASER_CACHE_nCnB 0ULL -/* For specifying the outer cacheability type only */ -#define GIC_BASER_CACHE_SameAsInner 0ULL -#define GIC_BASER_CACHE_nC 1ULL -#define GIC_BASER_CACHE_RaWt 2ULL -#define GIC_BASER_CACHE_RaWb 3ULL -#define GIC_BASER_CACHE_WaWt 4ULL -#define GIC_BASER_CACHE_WaWb 5ULL -#define GIC_BASER_CACHE_RaWaWt 6ULL -#define GIC_BASER_CACHE_RaWaWb 7ULL -#define GIC_BASER_CACHE_MASK 7ULL - -#define GIC_BASER_NonShareable 0ULL -#define GIC_BASER_InnerShareable 1ULL -#define GIC_BASER_OuterShareable 2ULL - -#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT 56 -#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ - (7UL << GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT) -#define GICR_PROPBASER_SHAREABILITY_SHIFT 10 -#define GICR_PROPBASER_SHAREABILITY_MASK \ - (3UL << GICR_PROPBASER_SHAREABILITY_SHIFT) -#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT 7 -#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ - (7UL << GICR_PROPBASER_INNER_CACHEABILITY_SHIFT) -#define GICR_PROPBASER_RES0_MASK \ - (GENMASK(63, 59) | GENMASK(55, 52) | GENMASK(6, 5)) - -#define GICR_PENDBASER_SHAREABILITY_SHIFT 10 -#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT 7 -#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT 56 -#define GICR_PENDBASER_SHAREABILITY_MASK \ - (3UL << GICR_PENDBASER_SHAREABILITY_SHIFT) -#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ - (7UL << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT) -#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ - (7UL << GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT) -#define GICR_PENDBASER_PTZ BIT(62, UL) -#define GICR_PENDBASER_RES0_MASK \ - (BIT(63, UL) | GENMASK(61, 59) | GENMASK(55, 52) | \ - GENMASK(15, 12) | GENMASK(6, 0)) - -#define DEFAULT_PMR_VALUE 0xff - -#define LPI_PROP_PRIO_MASK 0xfc -#define LPI_PROP_RES1 (1 << 1) -#define LPI_PROP_ENABLED (1 << 0) - -#define ICH_VMCR_EOI (1 << 9) -#define ICH_VMCR_VENG1 (1 << 1) -#define ICH_VMCR_PRIORITY_MASK 0xff -#define ICH_VMCR_PRIORITY_SHIFT 24 - -#define ICH_LR_VIRTUAL_MASK 0xffff -#define ICH_LR_VIRTUAL_SHIFT 0 -#define ICH_LR_CPUID_MASK 0x7 -#define ICH_LR_CPUID_SHIFT 10 -#define ICH_LR_PHYSICAL_MASK 0x3ff -#define ICH_LR_PHYSICAL_SHIFT 32 -#define ICH_LR_STATE_MASK 0x3 -#define ICH_LR_STATE_SHIFT 62 -#define ICH_LR_STATE_PENDING (1ULL << 62) -#define ICH_LR_STATE_ACTIVE (1ULL << 63) -#define ICH_LR_PRIORITY_MASK 0xff -#define ICH_LR_PRIORITY_SHIFT 48 -#define ICH_LR_HW_MASK 0x1 -#define ICH_LR_HW_SHIFT 61 -#define ICH_LR_GRP_MASK 0x1 -#define ICH_LR_GRP_SHIFT 60 -#define ICH_LR_MAINTENANCE_IRQ (1UL<<41) -#define ICH_LR_GRP1 (1UL<<60) -#define ICH_LR_HW (1UL<<61) - -#define ICH_VTR_NRLRGS 0x3f -#define ICH_VTR_PRIBITS_MASK 0x7 -#define ICH_VTR_PRIBITS_SHIFT 29 - -#define ICH_SGI_IRQMODE_SHIFT 40 -#define ICH_SGI_IRQMODE_MASK 0x1 -#define ICH_SGI_TARGET_OTHERS 1UL -#define ICH_SGI_TARGET_LIST 0 -#define ICH_SGI_IRQ_SHIFT 24 -#define ICH_SGI_IRQ_MASK 0xf -#define ICH_SGI_TARGETLIST_MASK 0xffff -#define ICH_SGI_AFFx_MASK 0xff -#define ICH_SGI_AFFINITY_LEVEL(x) (16 * (x)) - -struct rdist_region { - paddr_t base; - paddr_t size; - void __iomem *map_base; - bool single_rdist; -}; - -#endif /* __ASM_ARM_GIC_V3_DEFS_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/gic_v3_its.h b/xen/include/asm-arm/gic_v3_its.h deleted file mode 100644 index 94e5cb99c5..0000000000 --- a/xen/include/asm-arm/gic_v3_its.h +++ /dev/null @@ -1,283 +0,0 @@ -/* - * ARM GICv3 ITS support - * - * Andre Przywara - * Copyright (c) 2016,2017 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; under version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef __ASM_ARM_ITS_H__ -#define __ASM_ARM_ITS_H__ - -#define GITS_CTLR 0x000 -#define GITS_IIDR 0x004 -#define GITS_TYPER 0x008 -#define GITS_CBASER 0x080 -#define GITS_CWRITER 0x088 -#define GITS_CREADR 0x090 -#define GITS_BASER_NR_REGS 8 -#define GITS_BASER0 0x100 -#define GITS_BASER1 0x108 -#define GITS_BASER2 0x110 -#define GITS_BASER3 0x118 -#define GITS_BASER4 0x120 -#define GITS_BASER5 0x128 -#define GITS_BASER6 0x130 -#define GITS_BASER7 0x138 -#define GITS_PIDR2 GICR_PIDR2 - -/* Register bits */ -#define GITS_VALID_BIT BIT(63, UL) - -#define GITS_CTLR_QUIESCENT BIT(31, UL) -#define GITS_CTLR_ENABLE BIT(0, UL) - -#define GITS_TYPER_PTA BIT(19, UL) -#define GITS_TYPER_DEVIDS_SHIFT 13 -#define GITS_TYPER_DEVIDS_MASK (0x1fUL << GITS_TYPER_DEVIDS_SHIFT) -#define GITS_TYPER_DEVICE_ID_BITS(r) (((r & GITS_TYPER_DEVIDS_MASK) >> \ - GITS_TYPER_DEVIDS_SHIFT) + 1) - -#define GITS_TYPER_IDBITS_SHIFT 8 -#define GITS_TYPER_IDBITS_MASK (0x1fUL << GITS_TYPER_IDBITS_SHIFT) -#define GITS_TYPER_EVENT_ID_BITS(r) (((r & GITS_TYPER_IDBITS_MASK) >> \ - GITS_TYPER_IDBITS_SHIFT) + 1) - -#define GITS_TYPER_ITT_SIZE_SHIFT 4 -#define GITS_TYPER_ITT_SIZE_MASK (0xfUL << GITS_TYPER_ITT_SIZE_SHIFT) -#define GITS_TYPER_ITT_SIZE(r) ((((r) & GITS_TYPER_ITT_SIZE_MASK) >> \ - GITS_TYPER_ITT_SIZE_SHIFT) + 1) -#define GITS_TYPER_PHYSICAL (1U << 0) - -#define GITS_BASER_INDIRECT BIT(62, UL) -#define GITS_BASER_INNER_CACHEABILITY_SHIFT 59 -#define GITS_BASER_TYPE_SHIFT 56 -#define GITS_BASER_TYPE_MASK (7ULL << GITS_BASER_TYPE_SHIFT) -#define GITS_BASER_OUTER_CACHEABILITY_SHIFT 53 -#define GITS_BASER_TYPE_NONE 0UL -#define GITS_BASER_TYPE_DEVICE 1UL -#define GITS_BASER_TYPE_VCPU 2UL -#define GITS_BASER_TYPE_CPU 3UL -#define GITS_BASER_TYPE_COLLECTION 4UL -#define GITS_BASER_TYPE_RESERVED5 5UL -#define GITS_BASER_TYPE_RESERVED6 6UL -#define GITS_BASER_TYPE_RESERVED7 7UL -#define GITS_BASER_ENTRY_SIZE_SHIFT 48 -#define GITS_BASER_ENTRY_SIZE(reg) \ - (((reg >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) -#define GITS_BASER_SHAREABILITY_SHIFT 10 -#define GITS_BASER_PAGE_SIZE_SHIFT 8 -#define GITS_BASER_SIZE_MASK 0xff -#define GITS_BASER_SHAREABILITY_MASK (0x3ULL << GITS_BASER_SHAREABILITY_SHIFT) -#define GITS_BASER_OUTER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) -#define GITS_BASER_INNER_CACHEABILITY_MASK (0x7ULL << GITS_BASER_INNER_CACHEABILITY_SHIFT) - -#define GITS_CBASER_SIZE_MASK 0xff - -/* ITS command definitions */ -#define ITS_CMD_SIZE 32 - -#define GITS_CMD_MOVI 0x01 -#define GITS_CMD_INT 0x03 -#define GITS_CMD_CLEAR 0x04 -#define GITS_CMD_SYNC 0x05 -#define GITS_CMD_MAPD 0x08 -#define GITS_CMD_MAPC 0x09 -#define GITS_CMD_MAPTI 0x0a -#define GITS_CMD_MAPI 0x0b -#define GITS_CMD_INV 0x0c -#define GITS_CMD_INVALL 0x0d -#define GITS_CMD_MOVALL 0x0e -#define GITS_CMD_DISCARD 0x0f - -#define ITS_DOORBELL_OFFSET 0x10040 -#define GICV3_ITS_SIZE SZ_128K - -#include -#include - -#define HOST_ITS_FLUSH_CMD_QUEUE (1U << 0) -#define HOST_ITS_USES_PTA (1U << 1) - -/* We allocate LPIs on the hosts in chunks of 32 to reduce handling overhead. */ -#define LPI_BLOCK 32U - -/* data structure for each hardware ITS */ -struct host_its { - struct list_head entry; - const struct dt_device_node *dt_node; - paddr_t addr; - paddr_t size; - void __iomem *its_base; - unsigned int devid_bits; - unsigned int evid_bits; - unsigned int itte_size; - spinlock_t cmd_lock; - void *cmd_buf; - unsigned int flags; -}; - - -#ifdef CONFIG_HAS_ITS - -extern struct list_head host_its_list; - -#ifdef CONFIG_ACPI -unsigned long gicv3_its_make_hwdom_madt(const struct domain *d, - void *base_ptr); -#endif - -/* Deny iomem access for its */ -int gicv3_its_deny_access(const struct domain *d); - -bool gicv3_its_host_has_its(void); - -unsigned int vgic_v3_its_count(const struct domain *d); - -void gicv3_do_LPI(unsigned int lpi); - -int gicv3_lpi_init_rdist(void __iomem * rdist_base); - -/* Initialize the host structures for LPIs and the host ITSes. */ -int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits); -int gicv3_its_init(void); - -/* Store the physical address and ID for each redistributor as read from DT. */ -void gicv3_set_redist_address(paddr_t address, unsigned int redist_id); -uint64_t gicv3_get_redist_address(unsigned int cpu, bool use_pta); - -/* Map a collection for this host CPU to each host ITS. */ -int gicv3_its_setup_collection(unsigned int cpu); - -/* Initialize and destroy the per-domain parts of the virtual ITS support. */ -int vgic_v3_its_init_domain(struct domain *d); -void vgic_v3_its_free_domain(struct domain *d); - -/* Create the appropriate DT nodes for a hardware domain. */ -int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, - const struct dt_device_node *gic, - void *fdt); - -/* - * Map a device on the host by allocating an ITT on the host (ITS). - * "nr_event" specifies how many events (interrupts) this device will need. - * Setting "valid" to false deallocates the device. - */ -int gicv3_its_map_guest_device(struct domain *d, - paddr_t host_doorbell, uint32_t host_devid, - paddr_t guest_doorbell, uint32_t guest_devid, - uint64_t nr_events, bool valid); - -int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi); -void gicv3_free_host_lpi_block(uint32_t first_lpi); - -void vgic_vcpu_inject_lpi(struct domain *d, unsigned int virq); - -struct pending_irq *gicv3_its_get_event_pending_irq(struct domain *d, - paddr_t vdoorbell_address, - uint32_t vdevid, - uint32_t eventid); -int gicv3_remove_guest_event(struct domain *d, paddr_t vdoorbell_address, - uint32_t vdevid, uint32_t eventid); -struct pending_irq *gicv3_assign_guest_event(struct domain *d, paddr_t doorbell, - uint32_t devid, uint32_t eventid, - uint32_t virt_lpi); -void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id, - uint32_t virt_lpi); - -#else - -#ifdef CONFIG_ACPI -static inline unsigned long gicv3_its_make_hwdom_madt(const struct domain *d, - void *base_ptr) -{ - return 0; -} -#endif - -static inline int gicv3_its_deny_access(const struct domain *d) -{ - return 0; -} - -static inline bool gicv3_its_host_has_its(void) -{ - return false; -} - -static inline unsigned int vgic_v3_its_count(const struct domain *d) -{ - return 0; -} - -static inline void gicv3_do_LPI(unsigned int lpi) -{ - /* We don't enable LPIs without an ITS. */ - BUG(); -} - -static inline int gicv3_lpi_init_rdist(void __iomem * rdist_base) -{ - return -ENODEV; -} - -static inline int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits) -{ - return 0; -} - -static inline int gicv3_its_init(void) -{ - return 0; -} - -static inline void gicv3_set_redist_address(paddr_t address, - unsigned int redist_id) -{ -} - -static inline int gicv3_its_setup_collection(unsigned int cpu) -{ - /* We should never get here without an ITS. */ - BUG(); -} - -static inline int vgic_v3_its_init_domain(struct domain *d) -{ - return 0; -} - -static inline void vgic_v3_its_free_domain(struct domain *d) -{ -} - -static inline int gicv3_its_make_hwdom_dt_nodes(const struct domain *d, - const struct dt_device_node *gic, - void *fdt) -{ - return 0; -} - -#endif /* CONFIG_HAS_ITS */ - -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/grant_table.h b/xen/include/asm-arm/grant_table.h deleted file mode 100644 index d31a4d6805..0000000000 --- a/xen/include/asm-arm/grant_table.h +++ /dev/null @@ -1,108 +0,0 @@ -#ifndef __ASM_GRANT_TABLE_H__ -#define __ASM_GRANT_TABLE_H__ - -#include -#include -#include -#include - -#include - -#define INITIAL_NR_GRANT_FRAMES 1U -#define GNTTAB_MAX_VERSION 1 - -struct grant_table_arch { - gfn_t *shared_gfn; - gfn_t *status_gfn; -}; - -static inline void gnttab_clear_flags(struct domain *d, - unsigned int mask, uint16_t *addr) -{ - guest_clear_mask16(d, mask, addr); -} - -static inline void gnttab_mark_dirty(struct domain *d, mfn_t mfn) -{ -#ifndef NDEBUG - printk_once(XENLOG_G_WARNING "gnttab_mark_dirty not implemented yet\n"); -#endif -} - -int create_grant_host_mapping(unsigned long gpaddr, mfn_t mfn, - unsigned int flags, unsigned int cache_flags); -#define gnttab_host_mapping_get_page_type(ro, ld, rd) (0) -int replace_grant_host_mapping(unsigned long gpaddr, mfn_t mfn, - unsigned long new_gpaddr, unsigned int flags); -#define gnttab_release_host_mappings(domain) 1 - -/* - * The region used by Xen on the memory will never be mapped in DOM0 - * memory layout. Therefore it can be used for the grant table. - * - * Only use the text section as it's always present and will contain - * enough space for a large grant table - */ -#define gnttab_dom0_frames() \ - min_t(unsigned int, opt_max_grant_frames, PFN_DOWN(_etext - _stext)) - -#define gnttab_init_arch(gt) \ -({ \ - unsigned int ngf_ = (gt)->max_grant_frames; \ - unsigned int nsf_ = grant_to_status_frames(ngf_); \ - \ - (gt)->arch.shared_gfn = xmalloc_array(gfn_t, ngf_); \ - (gt)->arch.status_gfn = xmalloc_array(gfn_t, nsf_); \ - if ( (gt)->arch.shared_gfn && (gt)->arch.status_gfn ) \ - { \ - while ( ngf_-- ) \ - (gt)->arch.shared_gfn[ngf_] = INVALID_GFN; \ - while ( nsf_-- ) \ - (gt)->arch.status_gfn[nsf_] = INVALID_GFN; \ - } \ - else \ - gnttab_destroy_arch(gt); \ - (gt)->arch.shared_gfn ? 0 : -ENOMEM; \ -}) - -#define gnttab_destroy_arch(gt) \ - do { \ - XFREE((gt)->arch.shared_gfn); \ - XFREE((gt)->arch.status_gfn); \ - } while ( 0 ) - -#define gnttab_set_frame_gfn(gt, st, idx, gfn, mfn) \ - ({ \ - int rc_ = 0; \ - gfn_t ogfn = gnttab_get_frame_gfn(gt, st, idx); \ - if ( gfn_eq(ogfn, INVALID_GFN) || gfn_eq(ogfn, gfn) || \ - (rc_ = guest_physmap_remove_page((gt)->domain, ogfn, mfn, \ - 0)) == 0 ) \ - ((st) ? (gt)->arch.status_gfn \ - : (gt)->arch.shared_gfn)[idx] = (gfn); \ - rc_; \ - }) - -#define gnttab_get_frame_gfn(gt, st, idx) ({ \ - (st) ? gnttab_status_gfn(NULL, gt, idx) \ - : gnttab_shared_gfn(NULL, gt, idx); \ -}) - -#define gnttab_shared_gfn(d, t, i) \ - (((i) >= nr_grant_frames(t)) ? INVALID_GFN : (t)->arch.shared_gfn[i]) - -#define gnttab_status_gfn(d, t, i) \ - (((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i]) - -#define gnttab_need_iommu_mapping(d) \ - (is_domain_direct_mapped(d) && is_iommu_enabled(d)) - -#endif /* __ASM_GRANT_TABLE_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/guest_access.h b/xen/include/asm-arm/guest_access.h deleted file mode 100644 index 53766386d3..0000000000 --- a/xen/include/asm-arm/guest_access.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef __ASM_ARM_GUEST_ACCESS_H__ -#define __ASM_ARM_GUEST_ACCESS_H__ - -#include -#include - -unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len); -unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from, - unsigned len); -unsigned long raw_copy_from_guest(void *to, const void *from, unsigned len); -unsigned long raw_clear_guest(void *to, unsigned len); - -/* Copy data to guest physical address, then clean the region. */ -unsigned long copy_to_guest_phys_flush_dcache(struct domain *d, - paddr_t phys, - void *buf, - unsigned int len); - -int access_guest_memory_by_ipa(struct domain *d, paddr_t ipa, void *buf, - uint32_t size, bool is_write); - -#define __raw_copy_to_guest raw_copy_to_guest -#define __raw_copy_from_guest raw_copy_from_guest -#define __raw_clear_guest raw_clear_guest - -/* - * Pre-validate a guest handle. - * Allows use of faster __copy_* functions. - */ -/* All ARM guests are paging mode external and hence safe */ -#define guest_handle_okay(hnd, nr) (1) -#define guest_handle_subrange_okay(hnd, first, last) (1) - -#endif /* __ASM_ARM_GUEST_ACCESS_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/guest_atomics.h b/xen/include/asm-arm/guest_atomics.h deleted file mode 100644 index 9e2e96d4ff..0000000000 --- a/xen/include/asm-arm/guest_atomics.h +++ /dev/null @@ -1,148 +0,0 @@ -#ifndef _ARM_GUEST_ATOMICS_H -#define _ARM_GUEST_ATOMICS_H - -#include -#include - -/* - * The guest atomics helpers shares the same logic. We first try to use - * the *_timeout version of the operation. If it didn't timeout, then we - * successfully updated the memory. Nothing else to do. - * - * If it did timeout, then it means we didn't manage to update the - * memory. This is possibly because the guest is misbehaving (i.e tight - * store loop) but can also happen for other reasons (i.e nested Xen). - * In that case pause the domain and retry the operation, this time - * without a timeout. - * - * Note, those helpers rely on other part of the code to prevent sharing - * a page between Xen and multiple domain. - */ - -DECLARE_PER_CPU(unsigned int, guest_safe_atomic_max); - -#define guest_bitop(name) \ -static inline void guest_##name(struct domain *d, int nr, volatile void *p) \ -{ \ - perfc_incr(atomics_guest); \ - \ - if ( name##_timeout(nr, p, this_cpu(guest_safe_atomic_max)) ) \ - return; \ - \ - perfc_incr(atomics_guest_paused); \ - \ - domain_pause_nosync(d); \ - name(nr, p); \ - domain_unpause(d); \ -} - -#define guest_testop(name) \ -static inline int guest_##name(struct domain *d, int nr, volatile void *p) \ -{ \ - bool succeed; \ - int oldbit; \ - \ - perfc_incr(atomics_guest); \ - \ - succeed = name##_timeout(nr, p, &oldbit, \ - this_cpu(guest_safe_atomic_max)); \ - if ( succeed ) \ - return oldbit; \ - \ - perfc_incr(atomics_guest_paused); \ - \ - domain_pause_nosync(d); \ - oldbit = name(nr, p); \ - domain_unpause(d); \ - \ - return oldbit; \ -} - -guest_bitop(set_bit) -guest_bitop(clear_bit) -guest_bitop(change_bit) - -#undef guest_bitop - -/* test_bit does not use load-store atomic operations */ -#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p)) - -guest_testop(test_and_set_bit) -guest_testop(test_and_clear_bit) -guest_testop(test_and_change_bit) - -#undef guest_testop - -static inline void guest_clear_mask16(struct domain *d, uint16_t mask, - volatile uint16_t *p) -{ - perfc_incr(atomics_guest); - - if ( clear_mask16_timeout(mask, p, this_cpu(guest_safe_atomic_max)) ) - return; - - domain_pause_nosync(d); - clear_mask16(mask, p); - domain_unpause(d); -} - -static inline unsigned long __guest_cmpxchg(struct domain *d, - volatile void *ptr, - unsigned long old, - unsigned long new, - unsigned int size) -{ - unsigned long oldval = old; - - perfc_incr(atomics_guest); - - if ( __cmpxchg_timeout(ptr, &oldval, new, size, - this_cpu(guest_safe_atomic_max)) ) - return oldval; - - perfc_incr(atomics_guest_paused); - - domain_pause_nosync(d); - oldval = __cmpxchg(ptr, old, new, size); - domain_unpause(d); - - return oldval; -} - -#define guest_cmpxchg(d, ptr, o, n) \ - ((__typeof__(*(ptr)))__guest_cmpxchg(d, ptr, \ - (unsigned long)(o),\ - (unsigned long)(n),\ - sizeof (*(ptr)))) - -static inline uint64_t guest_cmpxchg64(struct domain *d, - volatile uint64_t *ptr, - uint64_t old, - uint64_t new) -{ - uint64_t oldval = old; - - perfc_incr(atomics_guest); - - if ( __cmpxchg64_timeout(ptr, &oldval, new, - this_cpu(guest_safe_atomic_max)) ) - return oldval; - - perfc_incr(atomics_guest_paused); - - domain_pause_nosync(d); - oldval = cmpxchg64(ptr, old, new); - domain_unpause(d); - - return oldval; -} - -#endif /* _ARM_GUEST_ATOMICS_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/guest_walk.h b/xen/include/asm-arm/guest_walk.h deleted file mode 100644 index 8768ac9894..0000000000 --- a/xen/include/asm-arm/guest_walk.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _XEN_GUEST_WALK_H -#define _XEN_GUEST_WALK_H - -/* Walk the guest's page tables in software. */ -bool guest_walk_tables(const struct vcpu *v, - vaddr_t gva, - paddr_t *ipa, - unsigned int *perms); - -#endif /* _XEN_GUEST_WALK_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/hardirq.h b/xen/include/asm-arm/hardirq.h deleted file mode 100644 index 67b6a673db..0000000000 --- a/xen/include/asm-arm/hardirq.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef __ASM_HARDIRQ_H -#define __ASM_HARDIRQ_H - -#include -#include - -typedef struct { - unsigned long __softirq_pending; - unsigned int __local_irq_count; -} __cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - -#define in_irq() (local_irq_count(smp_processor_id()) != 0) - -#define irq_enter() (local_irq_count(smp_processor_id())++) -#define irq_exit() (local_irq_count(smp_processor_id())--) - -#endif /* __ASM_HARDIRQ_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/hsr.h b/xen/include/asm-arm/hsr.h deleted file mode 100644 index 9b91b28c48..0000000000 --- a/xen/include/asm-arm/hsr.h +++ /dev/null @@ -1,217 +0,0 @@ -#ifndef __ASM_ARM_HSR_H -#define __ASM_ARM_HSR_H - -#include - -#if defined(CONFIG_ARM_64) -# include -#endif - -/* HSR data abort size definition */ -enum dabt_size { - DABT_BYTE = 0, - DABT_HALF_WORD = 1, - DABT_WORD = 2, - DABT_DOUBLE_WORD = 3, -}; - -union hsr { - register_t bits; - struct { - unsigned long iss:25; /* Instruction Specific Syndrome */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - }; - - /* Common to all conditional exception classes (0x0N, except 0x00). */ - struct hsr_cond { - unsigned long iss:20; /* Instruction Specific Syndrome */ - unsigned long cc:4; /* Condition Code */ - unsigned long ccvalid:1;/* CC Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } cond; - - struct hsr_wfi_wfe { - unsigned long ti:1; /* Trapped instruction */ - unsigned long sbzp:19; - unsigned long cc:4; /* Condition Code */ - unsigned long ccvalid:1;/* CC Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } wfi_wfe; - - /* reg, reg0, reg1 are 4 bits on AArch32, the fifth bit is sbzp. */ - struct hsr_cp32 { - unsigned long read:1; /* Direction */ - unsigned long crm:4; /* CRm */ - unsigned long reg:5; /* Rt */ - unsigned long crn:4; /* CRn */ - unsigned long op1:3; /* Op1 */ - unsigned long op2:3; /* Op2 */ - unsigned long cc:4; /* Condition Code */ - unsigned long ccvalid:1;/* CC Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } cp32; /* HSR_EC_CP15_32, CP14_32, CP10 */ - - struct hsr_cp64 { - unsigned long read:1; /* Direction */ - unsigned long crm:4; /* CRm */ - unsigned long reg1:5; /* Rt1 */ - unsigned long reg2:5; /* Rt2 */ - unsigned long sbzp2:1; - unsigned long op1:4; /* Op1 */ - unsigned long cc:4; /* Condition Code */ - unsigned long ccvalid:1;/* CC Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } cp64; /* HSR_EC_CP15_64, HSR_EC_CP14_64 */ - - struct hsr_cp { - unsigned long coproc:4; /* Number of coproc accessed */ - unsigned long sbz0p:1; - unsigned long tas:1; /* Trapped Advanced SIMD */ - unsigned long res0:14; - unsigned long cc:4; /* Condition Code */ - unsigned long ccvalid:1;/* CC Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } cp; /* HSR_EC_CP */ - - /* - * This encoding is valid only for ARMv8 (ARM DDI 0487B.a, pages D7-2271 and - * G6-4957). On ARMv7, encoding ISS for EC=0x13 is defined as UNK/SBZP - * (ARM DDI 0406C.c page B3-1431). UNK/SBZP means that hardware implements - * this field as Read-As-Zero. ARMv8 is backwards compatible with ARMv7: - * reading CCKNOWNPASS on ARMv7 will return 0, which means that condition - * check was passed or instruction was unconditional. - */ - struct hsr_smc32 { - unsigned long res0:19; /* Reserved */ - unsigned long ccknownpass:1; /* Instruction passed conditional check */ - unsigned long cc:4; /* Condition Code */ - unsigned long ccvalid:1;/* CC Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } smc32; /* HSR_EC_SMC32 */ - -#ifdef CONFIG_ARM_64 - struct hsr_sysreg { - unsigned long read:1; /* Direction */ - unsigned long crm:4; /* CRm */ - unsigned long reg:5; /* Rt */ - unsigned long crn:4; /* CRn */ - unsigned long op1:3; /* Op1 */ - unsigned long op2:3; /* Op2 */ - unsigned long op0:2; /* Op0 */ - unsigned long res0:3; - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; - } sysreg; /* HSR_EC_SYSREG */ -#endif - - struct hsr_iabt { - unsigned long ifsc:6; /* Instruction fault status code */ - unsigned long res0:1; /* RES0 */ - unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation */ - unsigned long res1:1; /* RES0 */ - unsigned long eat:1; /* External abort type */ - unsigned long fnv:1; /* FAR not Valid */ - unsigned long res2:14; - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } iabt; /* HSR_EC_INSTR_ABORT_* */ - - struct hsr_dabt { - unsigned long dfsc:6; /* Data Fault Status Code */ - unsigned long write:1; /* Write / not Read */ - unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation */ - unsigned long cache:1; /* Cache Maintenance */ - unsigned long eat:1; /* External Abort Type */ - unsigned long fnv:1; /* FAR not Valid */ -#ifdef CONFIG_ARM_32 - unsigned long sbzp0:5; -#else - unsigned long sbzp0:3; - unsigned long ar:1; /* Acquire Release */ - unsigned long sf:1; /* Sixty Four bit register */ -#endif - unsigned long reg:5; /* Register */ - unsigned long sign:1; /* Sign extend */ - unsigned long size:2; /* Access Size */ - unsigned long valid:1; /* Syndrome Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } dabt; /* HSR_EC_DATA_ABORT_* */ - - /* Contain the common bits between DABT and IABT */ - struct hsr_xabt { - unsigned long fsc:6; /* Fault status code */ - unsigned long pad1:1; /* Not common */ - unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation */ - unsigned long pad2:1; /* Not common */ - unsigned long eat:1; /* External abort type */ - unsigned long fnv:1; /* FAR not Valid */ - unsigned long pad3:14; /* Not common */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } xabt; - -#ifdef CONFIG_ARM_64 - struct hsr_brk { - unsigned long comment:16; /* Comment */ - unsigned long res0:9; - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ - } brk; -#endif -}; - -/* HSR.EC == HSR_CP{15,14,10}_32 */ -#define HSR_CP32_OP2_MASK (0x000e0000) -#define HSR_CP32_OP2_SHIFT (17) -#define HSR_CP32_OP1_MASK (0x0001c000) -#define HSR_CP32_OP1_SHIFT (14) -#define HSR_CP32_CRN_MASK (0x00003c00) -#define HSR_CP32_CRN_SHIFT (10) -#define HSR_CP32_CRM_MASK (0x0000001e) -#define HSR_CP32_CRM_SHIFT (1) -#define HSR_CP32_REGS_MASK (HSR_CP32_OP1_MASK|HSR_CP32_OP2_MASK|\ - HSR_CP32_CRN_MASK|HSR_CP32_CRM_MASK) - -/* HSR.EC == HSR_CP{15,14}_64 */ -#define HSR_CP64_OP1_MASK (0x000f0000) -#define HSR_CP64_OP1_SHIFT (16) -#define HSR_CP64_CRM_MASK (0x0000001e) -#define HSR_CP64_CRM_SHIFT (1) -#define HSR_CP64_REGS_MASK (HSR_CP64_OP1_MASK|HSR_CP64_CRM_MASK) - -/* HSR.EC == HSR_SYSREG */ -#define HSR_SYSREG_OP0_MASK (0x00300000) -#define HSR_SYSREG_OP0_SHIFT (20) -#define HSR_SYSREG_OP1_MASK (0x0001c000) -#define HSR_SYSREG_OP1_SHIFT (14) -#define HSR_SYSREG_CRN_MASK (0x00003c00) -#define HSR_SYSREG_CRN_SHIFT (10) -#define HSR_SYSREG_CRM_MASK (0x0000001e) -#define HSR_SYSREG_CRM_SHIFT (1) -#define HSR_SYSREG_OP2_MASK (0x000e0000) -#define HSR_SYSREG_OP2_SHIFT (17) -#define HSR_SYSREG_REGS_MASK (HSR_SYSREG_OP0_MASK|HSR_SYSREG_OP1_MASK|\ - HSR_SYSREG_CRN_MASK|HSR_SYSREG_CRM_MASK|\ - HSR_SYSREG_OP2_MASK) - -/* HSR.EC == HSR_{HVC32, HVC64, SMC64, SVC32, SVC64} */ -#define HSR_XXC_IMM_MASK (0xffff) - -#endif /* __ASM_ARM_HSR_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/hypercall.h b/xen/include/asm-arm/hypercall.h deleted file mode 100644 index a0c5a31a2f..0000000000 --- a/xen/include/asm-arm/hypercall.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __ASM_ARM_HYPERCALL_H__ -#define __ASM_ARM_HYPERCALL_H__ - -#include /* for arch_do_domctl */ -int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg); - -long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg); - -long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl); - -#endif /* __ASM_ARM_HYPERCALL_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/init.h b/xen/include/asm-arm/init.h deleted file mode 100644 index 5ac8cf8797..0000000000 --- a/xen/include/asm-arm/init.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _XEN_ASM_INIT_H -#define _XEN_ASM_INIT_H - -struct init_info -{ - /* Pointer to the stack, used by head.S when entering in C */ - unsigned char *stack; - /* Logical CPU ID, used by start_secondary */ - unsigned int cpuid; -}; - -#endif /* _XEN_ASM_INIT_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/insn.h b/xen/include/asm-arm/insn.h deleted file mode 100644 index 27271e95f9..0000000000 --- a/xen/include/asm-arm/insn.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __ARCH_ARM_INSN -#define __ARCH_ARM_INSN - -#ifndef __ASSEMBLY__ - -#include - -#if defined(CONFIG_ARM_64) -# include -#elif defined(CONFIG_ARM_32) -# include -#else -# error "unknown ARM variant" -#endif - -#endif /* __ASSEMBLY__ */ - -/* On ARM32,64 instructions are always 4 bytes long. */ -#define ARCH_PATCH_INSN_SIZE 4 - -#endif /* !__ARCH_ARM_INSN */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 8 - * indent-tabs-mode: t - * End: - */ diff --git a/xen/include/asm-arm/io.h b/xen/include/asm-arm/io.h deleted file mode 100644 index e426804424..0000000000 --- a/xen/include/asm-arm/io.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_IO_H -#define _ASM_IO_H - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/iocap.h b/xen/include/asm-arm/iocap.h deleted file mode 100644 index 276fefbc59..0000000000 --- a/xen/include/asm-arm/iocap.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __X86_IOCAP_H__ -#define __X86_IOCAP_H__ - -#define cache_flush_permitted(d) \ - (!rangeset_is_empty((d)->iomem_caps)) - -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/iommu.h b/xen/include/asm-arm/iommu.h deleted file mode 100644 index 937edc8373..0000000000 --- a/xen/include/asm-arm/iommu.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . -*/ -#ifndef __ARCH_ARM_IOMMU_H__ -#define __ARCH_ARM_IOMMU_H__ - -struct arch_iommu -{ - /* Private information for the IOMMU drivers */ - void *priv; -}; - -const struct iommu_ops *iommu_get_ops(void); -void iommu_set_ops(const struct iommu_ops *ops); - -/* - * The mapping helpers below should only be used if P2M Table is shared - * between the CPU and the IOMMU. - */ -int __must_check arm_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, - unsigned int flags, - unsigned int *flush_flags); -int __must_check arm_iommu_unmap_page(struct domain *d, dfn_t dfn, - unsigned int *flush_flags); - -#endif /* __ARCH_ARM_IOMMU_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/iommu_fwspec.h b/xen/include/asm-arm/iommu_fwspec.h deleted file mode 100644 index 5cdb53f8e8..0000000000 --- a/xen/include/asm-arm/iommu_fwspec.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * xen/include/asm-arm/iommu_fwspec.h - * - * Contains a common structure to hold the per-device firmware data and - * declaration of functions used to maintain that data - * - * Based on Linux's iommu_fwspec support you can find at: - * include/linux/iommu.h - * - * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. - * - * Copyright (C) 2019 EPAM Systems Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __ARCH_ARM_IOMMU_FWSPEC_H__ -#define __ARCH_ARM_IOMMU_FWSPEC_H__ - -/* per-device IOMMU instance data */ -struct iommu_fwspec { - /* this device's IOMMU */ - struct device *iommu_dev; - /* IOMMU driver private data for this device */ - void *iommu_priv; - /* number of associated device IDs */ - unsigned int num_ids; - /* IDs which this device may present to the IOMMU */ - uint32_t ids[]; -}; - -int iommu_fwspec_init(struct device *dev, struct device *iommu_dev); -void iommu_fwspec_free(struct device *dev); -int iommu_fwspec_add_ids(struct device *dev, const uint32_t *ids, - unsigned int num_ids); - -static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) -{ - return dev->iommu_fwspec; -} - -static inline void dev_iommu_fwspec_set(struct device *dev, - struct iommu_fwspec *fwspec) -{ - dev->iommu_fwspec = fwspec; -} - -#endif /* __ARCH_ARM_IOMMU_FWSPEC_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/ioreq.h b/xen/include/asm-arm/ioreq.h deleted file mode 100644 index 50185978d5..0000000000 --- a/xen/include/asm-arm/ioreq.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * ioreq.h: Hardware virtual machine assist interface definitions. - * - * Copyright (c) 2016 Citrix Systems Inc. - * Copyright (c) 2019 Arm ltd. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_ARM_IOREQ_H__ -#define __ASM_ARM_IOREQ_H__ - -#ifdef CONFIG_IOREQ_SERVER -enum io_state handle_ioserv(struct cpu_user_regs *regs, struct vcpu *v); -enum io_state try_fwd_ioserv(struct cpu_user_regs *regs, - struct vcpu *v, mmio_info_t *info); -#else -static inline enum io_state handle_ioserv(struct cpu_user_regs *regs, - struct vcpu *v) -{ - return IO_UNHANDLED; -} - -static inline enum io_state try_fwd_ioserv(struct cpu_user_regs *regs, - struct vcpu *v, mmio_info_t *info) -{ - return IO_UNHANDLED; -} -#endif - -static inline bool handle_pio(uint16_t port, unsigned int size, int dir) -{ - /* - * TODO: For Arm64, the main user will be PCI. So this should be - * implemented when we add support for vPCI. - */ - ASSERT_UNREACHABLE(); - return true; -} - -static inline void msix_write_completion(struct vcpu *v) -{ -} - -/* This correlation must not be altered */ -#define IOREQ_STATUS_HANDLED IO_HANDLED -#define IOREQ_STATUS_UNHANDLED IO_UNHANDLED -#define IOREQ_STATUS_RETRY IO_RETRY - -#endif /* __ASM_ARM_IOREQ_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/irq.h b/xen/include/asm-arm/irq.h deleted file mode 100644 index e45d574598..0000000000 --- a/xen/include/asm-arm/irq.h +++ /dev/null @@ -1,109 +0,0 @@ -#ifndef _ASM_HW_IRQ_H -#define _ASM_HW_IRQ_H - -#include -#include - -/* - * These defines correspond to the Xen internal representation of the - * IRQ types. We choose to make them the same as the existing device - * tree definitions for convenience. - */ -#define IRQ_TYPE_NONE DT_IRQ_TYPE_NONE -#define IRQ_TYPE_EDGE_RISING DT_IRQ_TYPE_EDGE_RISING -#define IRQ_TYPE_EDGE_FALLING DT_IRQ_TYPE_EDGE_FALLING -#define IRQ_TYPE_EDGE_BOTH DT_IRQ_TYPE_EDGE_BOTH -#define IRQ_TYPE_LEVEL_HIGH DT_IRQ_TYPE_LEVEL_HIGH -#define IRQ_TYPE_LEVEL_LOW DT_IRQ_TYPE_LEVEL_LOW -#define IRQ_TYPE_LEVEL_MASK DT_IRQ_TYPE_LEVEL_MASK -#define IRQ_TYPE_SENSE_MASK DT_IRQ_TYPE_SENSE_MASK -#define IRQ_TYPE_INVALID DT_IRQ_TYPE_INVALID - -#define NR_VECTORS 256 /* XXX */ - -typedef struct { - DECLARE_BITMAP(_bits,NR_VECTORS); -} vmask_t; - -struct arch_pirq -{ -}; - -struct arch_irq_desc { - unsigned int type; -}; - -#define NR_LOCAL_IRQS 32 - -/* - * This only covers the interrupts that Xen cares about, so SGIs, PPIs and - * SPIs. LPIs are too numerous, also only propagated to guests, so they are - * not included in this number. - */ -#define NR_IRQS 1024 - -#define LPI_OFFSET 8192 - -/* LPIs are always numbered starting at 8192, so 0 is a good invalid case. */ -#define INVALID_LPI 0 - -/* This is a spurious interrupt ID which never makes it into the GIC code. */ -#define INVALID_IRQ 1023 - -extern const unsigned int nr_irqs; -#define nr_static_irqs NR_IRQS -#define arch_hwdom_irqs(domid) NR_IRQS - -struct irq_desc; -struct irqaction; - -struct irq_desc *__irq_to_desc(int irq); - -#define irq_to_desc(irq) __irq_to_desc(irq) - -void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq); - -static inline bool is_lpi(unsigned int irq) -{ - return irq >= LPI_OFFSET; -} - -#define domain_pirq_to_irq(d, pirq) (pirq) - -bool is_assignable_irq(unsigned int irq); - -void init_IRQ(void); -void init_secondary_IRQ(void); - -int route_irq_to_guest(struct domain *d, unsigned int virq, - unsigned int irq, const char *devname); -int release_guest_irq(struct domain *d, unsigned int irq); - -void arch_move_irqs(struct vcpu *v); - -#define arch_evtchn_bind_pirq(d, pirq) ((void)((d) + (pirq))) - -/* Set IRQ type for an SPI */ -int irq_set_spi_type(unsigned int spi, unsigned int type); - -int irq_set_type(unsigned int irq, unsigned int type); - -int platform_get_irq(const struct dt_device_node *device, int index); - -void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask); - -/* - * Use this helper in places that need to know whether the IRQ type is - * set by the domain. - */ -bool irq_type_set_by_domain(const struct domain *d); - -#endif /* _ASM_HW_IRQ_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/kernel.h b/xen/include/asm-arm/kernel.h deleted file mode 100644 index 874aa108a7..0000000000 --- a/xen/include/asm-arm/kernel.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Kernel image loading. - * - * Copyright (C) 2011 Citrix Systems, Inc. - */ -#ifndef __ARCH_ARM_KERNEL_H__ -#define __ARCH_ARM_KERNEL_H__ - -#include -#include - -struct kernel_info { -#ifdef CONFIG_ARM_64 - enum domain_type type; -#endif - - struct domain *d; - - void *fdt; /* flat device tree */ - paddr_t unassigned_mem; /* RAM not (yet) assigned to a bank */ - struct meminfo mem; - - /* kernel entry point */ - paddr_t entry; - - /* grant table region */ - paddr_t gnttab_start; - paddr_t gnttab_size; - - /* boot blob load addresses */ - const struct bootmodule *kernel_bootmodule, *initrd_bootmodule, *dtb_bootmodule; - const char* cmdline; - paddr_t dtb_paddr; - paddr_t initrd_paddr; - - /* Enable pl011 emulation */ - bool vpl011; - - /* GIC phandle */ - uint32_t phandle_gic; - - /* loader to use for this kernel */ - void (*load)(struct kernel_info *info); - /* loader specific state */ - union { - struct { - paddr_t kernel_addr; - paddr_t len; -#ifdef CONFIG_ARM_64 - paddr_t text_offset; /* 64-bit Image only */ -#endif - paddr_t start; /* 32-bit zImage only */ - } zimage; - }; -}; - -/* - * Probe the kernel to detemine its type and select a loader. - * - * Sets in info: - * ->type - * ->load hook, and sets loader specific variables ->zimage - */ -int kernel_probe(struct kernel_info *info, const struct dt_device_node *domain); - -/* - * Loads the kernel into guest RAM. - * - * Expects to be set in info when called: - * ->mem - * ->fdt - * - * Sets in info: - * ->entry - * ->dtb_paddr - * ->initrd_paddr - */ -void kernel_load(struct kernel_info *info); - -#endif /* #ifdef __ARCH_ARM_KERNEL_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/livepatch.h b/xen/include/asm-arm/livepatch.h deleted file mode 100644 index 026af5e7dc..0000000000 --- a/xen/include/asm-arm/livepatch.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved. - * - */ - -#ifndef __XEN_ARM_LIVEPATCH_H__ -#define __XEN_ARM_LIVEPATCH_H__ - -#include /* For SZ_* macros. */ -#include - -/* - * The va of the hypervisor .text region. We need this as the - * normal va are write protected. - */ -extern void *vmap_of_xen_text; - -/* These ranges are only for unconditional branches. */ -#ifdef CONFIG_ARM_32 -/* ARM32: A4.3 IN ARM DDI 0406C.c - we are using only ARM instructions in Xen.*/ -#define ARCH_LIVEPATCH_RANGE SZ_32M -#else -/* ARM64: C1.3.2 in ARM DDI 0487A.j */ -#define ARCH_LIVEPATCH_RANGE SZ_128M -#endif - -#endif /* __XEN_ARM_LIVEPATCH_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h deleted file mode 100644 index e94de2e7d8..0000000000 --- a/xen/include/asm-arm/lpae.h +++ /dev/null @@ -1,257 +0,0 @@ -#ifndef __ARM_LPAE_H__ -#define __ARM_LPAE_H__ - -#ifndef __ASSEMBLY__ - -#include - -/* - * WARNING! Unlike the x86 pagetable code, where l1 is the lowest level and - * l4 is the root of the trie, the ARM pagetables follow ARM's documentation: - * the levels are called first, second &c in the order that the MMU walks them - * (i.e. "first" is the root of the trie). - */ - -/****************************************************************************** - * ARMv7-A LPAE pagetables: 3-level trie, mapping 40-bit input to - * 40-bit output addresses. Tables at all levels have 512 64-bit entries - * (i.e. are 4Kb long). - * - * The bit-shuffling that has the permission bits in branch nodes in a - * different place from those in leaf nodes seems to be to allow linear - * pagetable tricks. If we're not doing that then the set of permission - * bits that's not in use in a given node type can be used as - * extra software-defined bits. - */ - -typedef struct __packed { - /* These are used in all kinds of entry. */ - unsigned long valid:1; /* Valid mapping */ - unsigned long table:1; /* == 1 in 4k map entries too */ - - /* - * These ten bits are only used in Block entries and are ignored - * in Table entries. - */ - unsigned long ai:3; /* Attribute Index */ - unsigned long ns:1; /* Not-Secure */ - unsigned long up:1; /* Unpriviledged access */ - unsigned long ro:1; /* Read-Only */ - unsigned long sh:2; /* Shareability */ - unsigned long af:1; /* Access Flag */ - unsigned long ng:1; /* Not-Global */ - - /* The base address must be appropriately aligned for Block entries */ - unsigned long long base:36; /* Base address of block or next table */ - unsigned long sbz:4; /* Must be zero */ - - /* - * These seven bits are only used in Block entries and are ignored - * in Table entries. - */ - unsigned long contig:1; /* In a block of 16 contiguous entries */ - unsigned long pxn:1; /* Privileged-XN */ - unsigned long xn:1; /* eXecute-Never */ - unsigned long avail:4; /* Ignored by hardware */ - - /* - * These 5 bits are only used in Table entries and are ignored in - * Block entries. - */ - unsigned long pxnt:1; /* Privileged-XN */ - unsigned long xnt:1; /* eXecute-Never */ - unsigned long apt:2; /* Access Permissions */ - unsigned long nst:1; /* Not-Secure */ -} lpae_pt_t; - -/* - * The p2m tables have almost the same layout, but some of the permission - * and cache-control bits are laid out differently (or missing). - */ -typedef struct __packed { - /* These are used in all kinds of entry. */ - unsigned long valid:1; /* Valid mapping */ - unsigned long table:1; /* == 1 in 4k map entries too */ - - /* - * These ten bits are only used in Block entries and are ignored - * in Table entries. - */ - unsigned long mattr:4; /* Memory Attributes */ - unsigned long read:1; /* Read access */ - unsigned long write:1; /* Write access */ - unsigned long sh:2; /* Shareability */ - unsigned long af:1; /* Access Flag */ - unsigned long sbz4:1; - - /* The base address must be appropriately aligned for Block entries */ - unsigned long long base:36; /* Base address of block or next table */ - unsigned long sbz3:4; - - /* - * These seven bits are only used in Block entries and are ignored - * in Table entries. - */ - unsigned long contig:1; /* In a block of 16 contiguous entries */ - unsigned long sbz2:1; - unsigned long xn:1; /* eXecute-Never */ - unsigned long type:4; /* Ignore by hardware. Used to store p2m types */ - - unsigned long sbz1:5; -} lpae_p2m_t; - -/* Permission mask: xn, write, read */ -#define P2M_PERM_MASK (0x00400000000000C0ULL) -#define P2M_CLEAR_PERM(pte) ((pte).bits & ~P2M_PERM_MASK) - -/* - * Walk is the common bits of p2m and pt entries which are needed to - * simply walk the table (e.g. for debug). - */ -typedef struct __packed { - /* These are used in all kinds of entry. */ - unsigned long valid:1; /* Valid mapping */ - unsigned long table:1; /* == 1 in 4k map entries too */ - - unsigned long pad2:10; - - /* The base address must be appropriately aligned for Block entries */ - unsigned long long base:36; /* Base address of block or next table */ - - unsigned long pad1:16; -} lpae_walk_t; - -typedef union { - uint64_t bits; - lpae_pt_t pt; - lpae_p2m_t p2m; - lpae_walk_t walk; -} lpae_t; - -static inline bool lpae_is_valid(lpae_t pte) -{ - return pte.walk.valid; -} - -/* - * lpae_is_* don't check the valid bit. This gives an opportunity for the - * callers to operate on the entry even if they are not valid. For - * instance to store information in advance. - */ -static inline bool lpae_is_table(lpae_t pte, unsigned int level) -{ - return (level < 3) && pte.walk.table; -} - -static inline bool lpae_is_mapping(lpae_t pte, unsigned int level) -{ - if ( level == 3 ) - return pte.walk.table; - else - return !pte.walk.table; -} - -static inline bool lpae_is_superpage(lpae_t pte, unsigned int level) -{ - return (level < 3) && lpae_is_mapping(pte, level); -} - -#define lpae_get_mfn(pte) (_mfn((pte).walk.base)) -#define lpae_set_mfn(pte, mfn) ((pte).walk.base = mfn_x(mfn)) - -/* - * AArch64 supports pages with different sizes (4K, 16K, and 64K). - * Provide a set of generic helpers that will compute various - * information based on the page granularity. - * - * Note the parameter 'gs' is the page shift of the granularity used. - * Some macro will evaluate 'gs' twice rather than storing in a - * variable. This is to allow using the macros in assembly. - */ - -/* - * Granularity | PAGE_SHIFT | LPAE_SHIFT - * ------------------------------------- - * 4K | 12 | 9 - * 16K | 14 | 11 - * 64K | 16 | 13 - * - * This is equivalent to LPAE_SHIFT = PAGE_SHIFT - 3 - */ -#define LPAE_SHIFT_GS(gs) ((gs) - 3) -#define LPAE_ENTRIES_GS(gs) (_AC(1, U) << LPAE_SHIFT_GS(gs)) -#define LPAE_ENTRIES_MASK_GS(gs) (LPAE_ENTRIES_GS(gs) - 1) - -#define LEVEL_ORDER_GS(gs, lvl) ((3 - (lvl)) * LPAE_SHIFT_GS(gs)) -#define LEVEL_SHIFT_GS(gs, lvl) (LEVEL_ORDER_GS(gs, lvl) + (gs)) -#define LEVEL_SIZE_GS(gs, lvl) (_AT(paddr_t, 1) << LEVEL_SHIFT_GS(gs, lvl)) - -/* Offset in the table at level 'lvl' */ -#define LPAE_TABLE_INDEX_GS(gs, lvl, addr) \ - (((addr) >> LEVEL_SHIFT_GS(gs, lvl)) & LPAE_ENTRIES_MASK_GS(gs)) - -/* Generate an array @var containing the offset for each level from @addr */ -#define DECLARE_OFFSETS(var, addr) \ - const unsigned int var[4] = { \ - zeroeth_table_offset(addr), \ - first_table_offset(addr), \ - second_table_offset(addr), \ - third_table_offset(addr) \ - } - -#endif /* __ASSEMBLY__ */ - -/* - * These numbers add up to a 48-bit input address space. - * - * On 32-bit the zeroeth level does not exist, therefore the total is - * 39-bits. The ARMv7-A architecture actually specifies a 40-bit input - * address space for the p2m, with an 8K (1024-entry) top-level table. - * However Xen only supports 16GB of RAM on 32-bit ARM systems and - * therefore 39-bits are sufficient. - */ - -#define LPAE_SHIFT 9 -#define LPAE_ENTRIES (_AC(1,U) << LPAE_SHIFT) -#define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1) - -#define THIRD_SHIFT (PAGE_SHIFT) -#define THIRD_ORDER (THIRD_SHIFT - PAGE_SHIFT) -#define THIRD_SIZE (_AT(paddr_t, 1) << THIRD_SHIFT) -#define THIRD_MASK (~(THIRD_SIZE - 1)) -#define SECOND_SHIFT (THIRD_SHIFT + LPAE_SHIFT) -#define SECOND_ORDER (SECOND_SHIFT - PAGE_SHIFT) -#define SECOND_SIZE (_AT(paddr_t, 1) << SECOND_SHIFT) -#define SECOND_MASK (~(SECOND_SIZE - 1)) -#define FIRST_SHIFT (SECOND_SHIFT + LPAE_SHIFT) -#define FIRST_ORDER (FIRST_SHIFT - PAGE_SHIFT) -#define FIRST_SIZE (_AT(paddr_t, 1) << FIRST_SHIFT) -#define FIRST_MASK (~(FIRST_SIZE - 1)) -#define ZEROETH_SHIFT (FIRST_SHIFT + LPAE_SHIFT) -#define ZEROETH_ORDER (ZEROETH_SHIFT - PAGE_SHIFT) -#define ZEROETH_SIZE (_AT(paddr_t, 1) << ZEROETH_SHIFT) -#define ZEROETH_MASK (~(ZEROETH_SIZE - 1)) - -/* Calculate the offsets into the pagetables for a given VA */ -#define zeroeth_linear_offset(va) ((va) >> ZEROETH_SHIFT) -#define first_linear_offset(va) ((va) >> FIRST_SHIFT) -#define second_linear_offset(va) ((va) >> SECOND_SHIFT) -#define third_linear_offset(va) ((va) >> THIRD_SHIFT) - -#define TABLE_OFFSET(offs) (_AT(unsigned int, offs) & LPAE_ENTRY_MASK) -#define first_table_offset(va) TABLE_OFFSET(first_linear_offset(va)) -#define second_table_offset(va) TABLE_OFFSET(second_linear_offset(va)) -#define third_table_offset(va) TABLE_OFFSET(third_linear_offset(va)) -#define zeroeth_table_offset(va) TABLE_OFFSET(zeroeth_linear_offset(va)) - -#endif /* __ARM_LPAE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/macros.h b/xen/include/asm-arm/macros.h deleted file mode 100644 index 1aa373760f..0000000000 --- a/xen/include/asm-arm/macros.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef __ASM_MACROS_H -#define __ASM_MACROS_H - -#ifndef __ASSEMBLY__ -# error "This file should only be included in assembly file" -#endif - - /* - * Speculative barrier - * XXX: Add support for the 'sb' instruction - */ - .macro sb - dsb nsh - isb - .endm - -#if defined (CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - - /* NOP sequence */ - .macro nops, num - .rept \num - nop - .endr - .endm - -#endif /* __ASM_ARM_MACROS_H */ diff --git a/xen/include/asm-arm/mem_access.h b/xen/include/asm-arm/mem_access.h deleted file mode 100644 index 35ed0ad154..0000000000 --- a/xen/include/asm-arm/mem_access.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * mem_access.h: architecture specific mem_access handling routines - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef _ASM_ARM_MEM_ACCESS_H -#define _ASM_ARM_MEM_ACCESS_H - -static inline -bool p2m_mem_access_emulate_check(struct vcpu *v, - const struct vm_event_st *rsp) -{ - /* Not supported on ARM. */ - return false; -} - -/* vm_event and mem_access are supported on any ARM guest */ -static inline bool p2m_mem_access_sanity_check(struct domain *d) -{ - return true; -} - -/* - * Send mem event based on the access. Boolean return value indicates if trap - * needs to be injected into guest. - */ -bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec); - -struct page_info* -p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag, - const struct vcpu *v); - -#endif /* _ASM_ARM_MEM_ACCESS_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h deleted file mode 100644 index 7b5e7b7f69..0000000000 --- a/xen/include/asm-arm/mm.h +++ /dev/null @@ -1,373 +0,0 @@ -#ifndef __ARCH_ARM_MM__ -#define __ARCH_ARM_MM__ - -#include -#include -#include -#include - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -/* Align Xen to a 2 MiB boundary. */ -#define XEN_PADDR_ALIGN (1 << 21) - -/* - * Per-page-frame information. - * - * Every architecture must ensure the following: - * 1. 'struct page_info' contains a 'struct page_list_entry list'. - * 2. Provide a PFN_ORDER() macro for accessing the order of a free page. - */ -#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) - -struct page_info -{ - /* Each frame can be threaded onto a doubly-linked list. */ - struct page_list_entry list; - - /* Reference count and various PGC_xxx flags and fields. */ - unsigned long count_info; - - /* Context-dependent fields follow... */ - union { - /* Page is in use: ((count_info & PGC_count_mask) != 0). */ - struct { - /* Type reference count and various PGT_xxx flags and fields. */ - unsigned long type_info; - } inuse; - /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ - union { - struct { - /* - * Index of the first *possibly* unscrubbed page in the buddy. - * One more bit than maximum possible order to accommodate - * INVALID_DIRTY_IDX. - */ -#define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1) - unsigned long first_dirty:MAX_ORDER + 1; - - /* Do TLBs need flushing for safety before next page use? */ - bool need_tlbflush:1; - -#define BUDDY_NOT_SCRUBBING 0 -#define BUDDY_SCRUBBING 1 -#define BUDDY_SCRUB_ABORT 2 - unsigned long scrub_state:2; - }; - - unsigned long val; - } free; - - } u; - - union { - /* Page is in use, but not as a shadow. */ - struct { - /* Owner of this page (zero if page is anonymous). */ - struct domain *domain; - } inuse; - - /* Page is on a free list. */ - struct { - /* Order-size of the free chunk this page is the head of. */ - unsigned int order; - } free; - - } v; - - union { - /* - * Timestamp from 'TLB clock', used to avoid extra safety flushes. - * Only valid for: a) free pages, and b) pages with zero type count - */ - u32 tlbflush_timestamp; - }; - u64 pad; -}; - -#define PG_shift(idx) (BITS_PER_LONG - (idx)) -#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) - -#define PGT_none PG_mask(0, 1) /* no special uses of this page */ -#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */ -#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */ - - /* Count of uses of this frame as its current type. */ -#define PGT_count_width PG_shift(2) -#define PGT_count_mask ((1UL<count_info&PGC_state) == PGC_state_##st) -/* Page is not reference counted */ -#define _PGC_extra PG_shift(10) -#define PGC_extra PG_mask(1, 10) - -/* Count of references to this frame. */ -#define PGC_count_width PG_shift(10) -#define PGC_count_mask ((1UL<= mfn_x(xenheap_mfn_start) && \ - mfn_ < mfn_x(xenheap_mfn_end)); \ -}) -#else -#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) -#define is_xen_heap_mfn(mfn) \ - (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) -#endif - -#define is_xen_fixed_mfn(mfn) \ - ((mfn_to_maddr(mfn) >= virt_to_maddr(&_start)) && \ - (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1))) - -#define page_get_owner(_p) (_p)->v.inuse.domain -#define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) - -#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma)))) - -#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) -/* PDX of the first page in the frame table. */ -extern unsigned long frametable_base_pdx; - -extern unsigned long max_page; -extern unsigned long total_pages; - -#define PDX_GROUP_SHIFT SECOND_SHIFT - -/* Boot-time pagetable setup */ -extern void setup_pagetables(unsigned long boot_phys_offset); -/* Map FDT in boot pagetable */ -extern void *early_fdt_map(paddr_t fdt_paddr); -/* Remove early mappings */ -extern void remove_early_mappings(void); -/* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the - * new page table */ -extern int init_secondary_pagetables(int cpu); -/* Switch secondary CPUS to its own pagetables and finalise MMU setup */ -extern void mmu_init_secondary_cpu(void); -/* Set up the xenheap: up to 1GB of contiguous, always-mapped memory. - * Base must be 32MB aligned and size a multiple of 32MB. */ -extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns); -/* Map a frame table to cover physical addresses ps through pe */ -extern void setup_frametable_mappings(paddr_t ps, paddr_t pe); -/* Map a 4k page in a fixmap entry */ -extern void set_fixmap(unsigned map, mfn_t mfn, unsigned attributes); -/* Remove a mapping from a fixmap entry */ -extern void clear_fixmap(unsigned map); -/* map a physical range in virtual memory */ -void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned attributes); - -static inline void __iomem *ioremap_nocache(paddr_t start, size_t len) -{ - return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE); -} - -static inline void __iomem *ioremap_cache(paddr_t start, size_t len) -{ - return ioremap_attr(start, len, PAGE_HYPERVISOR); -} - -static inline void __iomem *ioremap_wc(paddr_t start, size_t len) -{ - return ioremap_attr(start, len, PAGE_HYPERVISOR_WC); -} - -/* XXX -- account for base */ -#define mfn_valid(mfn) ({ \ - unsigned long __m_f_n = mfn_x(mfn); \ - likely(pfn_to_pdx(__m_f_n) >= frametable_base_pdx && __mfn_valid(__m_f_n)); \ -}) - -/* Convert between machine frame numbers and page-info structures. */ -#define mfn_to_page(mfn) \ - (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx)) -#define page_to_mfn(pg) \ - pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx) - -/* Convert between machine addresses and page-info structures. */ -#define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma)) -#define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg))) - -/* Convert between frame number and address formats. */ -#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT) -#define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT)) -#define paddr_to_pdx(pa) mfn_to_pdx(maddr_to_mfn(pa)) -#define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn)) -#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga)) -#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn)) -#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma)) -#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va)) -#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) - -/* Page-align address and convert to frame number format */ -#define paddr_to_pfn_aligned(paddr) paddr_to_pfn(PAGE_ALIGN(paddr)) - -static inline paddr_t __virt_to_maddr(vaddr_t va) -{ - uint64_t par = va_to_par(va); - return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK); -} -#define virt_to_maddr(va) __virt_to_maddr((vaddr_t)(va)) - -#ifdef CONFIG_ARM_32 -static inline void *maddr_to_virt(paddr_t ma) -{ - ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma))); - ma -= mfn_to_maddr(xenheap_mfn_start); - return (void *)(unsigned long) ma + XENHEAP_VIRT_START; -} -#else -static inline void *maddr_to_virt(paddr_t ma) -{ - ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - xenheap_base_pdx) < - (DIRECTMAP_SIZE >> PAGE_SHIFT)); - return (void *)(XENHEAP_VIRT_START - - (xenheap_base_pdx << PAGE_SHIFT) + - ((ma & ma_va_bottom_mask) | - ((ma & ma_top_mask) >> pfn_pdx_hole_shift))); -} -#endif - -/* - * Translate a guest virtual address to a machine address. - * Return the fault information if the translation has failed else 0. - */ -static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa, - unsigned int flags) -{ - uint64_t par = gva_to_ma_par(va, flags); - if ( par & PAR_F ) - return par; - *pa = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK); - return 0; -} - -/* Convert between Xen-heap virtual addresses and machine addresses. */ -#define __pa(x) (virt_to_maddr(x)) -#define __va(x) (maddr_to_virt(x)) - -/* Convert between Xen-heap virtual addresses and machine frame numbers. */ -#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) -#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT)) - -/* - * We define non-underscored wrappers for above conversion functions. - * These are overriden in various source files while underscored version - * remain intact. - */ -#define virt_to_mfn(va) __virt_to_mfn(va) -#define mfn_to_virt(mfn) __mfn_to_virt(mfn) - -/* Convert between Xen-heap virtual addresses and page-info structures. */ -static inline struct page_info *virt_to_page(const void *v) -{ - unsigned long va = (unsigned long)v; - unsigned long pdx; - - ASSERT(va >= XENHEAP_VIRT_START); - ASSERT(va < xenheap_virt_end); - - pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT; - pdx += mfn_to_pdx(xenheap_mfn_start); - return frame_table + pdx - frametable_base_pdx; -} - -static inline void *page_to_virt(const struct page_info *pg) -{ - return mfn_to_virt(mfn_x(page_to_mfn(pg))); -} - -struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va, - unsigned long flags); - -/* - * Arm does not have an M2P, but common code expects a handful of - * M2P-related defines and functions. Provide dummy versions of these. - */ -#define INVALID_M2P_ENTRY (~0UL) -#define SHARED_M2P_ENTRY (~0UL - 1UL) -#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) - -/* Xen always owns P2M on ARM */ -#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while (0) -#define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn))) - -/* Arch-specific portion of memory_op hypercall. */ -long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg); - -#define domain_set_alloc_bitsize(d) ((void)0) -#define domain_clamp_alloc_bitsize(d, b) (b) - -unsigned long domain_get_maximum_gpfn(struct domain *d); - -#define memguard_guard_stack(_p) ((void)0) -#define memguard_guard_range(_p,_l) ((void)0) -#define memguard_unguard_range(_p,_l) ((void)0) - -/* Release all __init and __initdata ranges to be reused */ -void free_init_memory(void); - -int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, - unsigned int order); - -extern void put_page_type(struct page_info *page); -static inline void put_page_and_type(struct page_info *page) -{ - put_page_type(page); - put_page(page); -} - -void clear_and_clean_page(struct page_info *page); - -unsigned int arch_get_dma_bitsize(void); - -#endif /* __ARCH_ARM_MM__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h deleted file mode 100644 index 7ab873cb8f..0000000000 --- a/xen/include/asm-arm/mmio.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * xen/include/asm-arm/mmio.h - * - * ARM I/O handlers - * - * Copyright (c) 2011 Citrix Systems. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_MMIO_H__ -#define __ASM_ARM_MMIO_H__ - -#include -#include - -#include - -#define MAX_IO_HANDLER 16 - -typedef struct -{ - struct hsr_dabt dabt; - paddr_t gpa; -} mmio_info_t; - -enum io_state -{ - IO_ABORT, /* The IO was handled by the helper and led to an abort. */ - IO_HANDLED, /* The IO was successfully handled by the helper. */ - IO_UNHANDLED, /* The IO was not handled by the helper. */ - IO_RETRY, /* Retry the emulation for some reason */ -}; - -typedef int (*mmio_read_t)(struct vcpu *v, mmio_info_t *info, - register_t *r, void *priv); -typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t *info, - register_t r, void *priv); - -struct mmio_handler_ops { - mmio_read_t read; - mmio_write_t write; -}; - -struct mmio_handler { - paddr_t addr; - paddr_t size; - const struct mmio_handler_ops *ops; - void *priv; -}; - -struct vmmio { - int num_entries; - int max_num_entries; - rwlock_t lock; - struct mmio_handler *handlers; -}; - -enum io_state try_handle_mmio(struct cpu_user_regs *regs, - const union hsr hsr, - paddr_t gpa); -void register_mmio_handler(struct domain *d, - const struct mmio_handler_ops *ops, - paddr_t addr, paddr_t size, void *priv); -int domain_io_init(struct domain *d, int max_count); -void domain_io_free(struct domain *d); - - -#endif /* __ASM_ARM_MMIO_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/monitor.h b/xen/include/asm-arm/monitor.h deleted file mode 100644 index 7567be66bd..0000000000 --- a/xen/include/asm-arm/monitor.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * include/asm-arm/monitor.h - * - * Arch-specific monitor_op domctl handler. - * - * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) - * Copyright (c) 2016, Bitdefender S.R.L. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __ASM_ARM_MONITOR_H__ -#define __ASM_ARM_MONITOR_H__ - -#include -#include - -static inline -void arch_monitor_allow_userspace(struct domain *d, bool allow_userspace) -{ -} - -static inline -int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop) -{ - /* No arch-specific monitor ops on ARM. */ - return -EOPNOTSUPP; -} - -int arch_monitor_domctl_event(struct domain *d, - struct xen_domctl_monitor_op *mop); - -static inline -int arch_monitor_init_domain(struct domain *d) -{ - /* No arch-specific domain initialization on ARM. */ - return 0; -} - -static inline -void arch_monitor_cleanup_domain(struct domain *d) -{ - /* No arch-specific domain cleanup on ARM. */ -} - -static inline uint32_t arch_monitor_get_capabilities(struct domain *d) -{ - uint32_t capabilities = 0; - - capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST | - 1U << XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL); - - return capabilities; -} - -int monitor_smc(void); - -#endif /* __ASM_ARM_MONITOR_H__ */ diff --git a/xen/include/asm-arm/new_vgic.h b/xen/include/asm-arm/new_vgic.h deleted file mode 100644 index 97d622bff6..0000000000 --- a/xen/include/asm-arm/new_vgic.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright (C) 2015, 2016 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_ARM_NEW_VGIC_H -#define __ASM_ARM_NEW_VGIC_H - -#include -#include -#include -#include -#include - -#define VGIC_V3_MAX_CPUS 255 -#define VGIC_V2_MAX_CPUS 8 -#define VGIC_NR_SGIS 16 -#define VGIC_NR_PPIS 16 -#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) -#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1) -#define VGIC_MAX_SPI 1019 -#define VGIC_MAX_RESERVED 1023 -#define VGIC_MIN_LPI 8192 - -#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) -#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ - (irq) <= VGIC_MAX_SPI) - -enum vgic_type { - VGIC_V2, /* Good ol' GICv2 */ - VGIC_V3, /* New fancy GICv3 */ -}; - -#define VGIC_V2_MAX_LRS (1 << 6) -#define VGIC_V3_MAX_LRS 16 -#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) - -#define VGIC_CONFIG_EDGE false -#define VGIC_CONFIG_LEVEL true - -struct vgic_irq { - struct list_head ap_list; - - struct vcpu *vcpu; /* - * SGIs and PPIs: The VCPU - * SPIs and LPIs: The VCPU whose ap_list - * this is queued on. - */ - - struct vcpu *target_vcpu; /* - * The VCPU that this interrupt should - * be sent to, as a result of the - * targets reg (v2) or the affinity reg (v3). - */ - - spinlock_t irq_lock; /* Protects the content of the struct */ - uint32_t intid; /* Guest visible INTID */ - atomic_t refcount; /* Used for LPIs */ - uint32_t hwintid; /* HW INTID number */ - union - { - struct { - uint8_t targets; /* GICv2 target VCPUs mask */ - uint8_t source; /* GICv2 SGIs only */ - }; - uint32_t mpidr; /* GICv3 target VCPU */ - }; - uint8_t priority; - bool line_level:1; /* Level only */ - bool pending_latch:1; /* - * The pending latch state used to - * calculate the pending state for both - * level and edge triggered IRQs. - */ - bool active:1; /* not used for LPIs */ - bool enabled:1; - bool hw:1; /* Tied to HW IRQ */ - bool config:1; /* Level or edge */ - struct list_head lpi_list; /* Used to link all LPIs together */ -}; - -enum iodev_type { - IODEV_DIST, - IODEV_REDIST, -}; - -struct vgic_io_device { - gfn_t base_fn; - struct vcpu *redist_vcpu; - const struct vgic_register_region *regions; - enum iodev_type iodev_type; - unsigned int nr_regions; -}; - -struct vgic_dist { - bool ready; - bool initialized; - - /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ - uint32_t version; - - /* Do injected MSIs require an additional device ID? */ - bool msis_require_devid; - - unsigned int nr_spis; - - /* base addresses in guest physical address space: */ - paddr_t vgic_dist_base; /* distributor */ - union - { - /* either a GICv2 CPU interface */ - paddr_t vgic_cpu_base; - /* or a number of GICv3 redistributor regions */ - struct - { - paddr_t vgic_redist_base; - paddr_t vgic_redist_free_offset; - }; - }; - - /* distributor enabled */ - bool enabled; - - struct vgic_irq *spis; - unsigned long *allocated_irqs; /* bitmap of IRQs allocated */ - - struct vgic_io_device dist_iodev; - - bool has_its; - - /* - * Contains the attributes and gpa of the LPI configuration table. - * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share - * one address across all redistributors. - * GICv3 spec: 6.1.2 "LPI Configuration tables" - */ - uint64_t propbaser; - - /* Protects the lpi_list and the count value below. */ - spinlock_t lpi_list_lock; - struct list_head lpi_list_head; - unsigned int lpi_list_count; -}; - -struct vgic_cpu { - struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; - - struct list_head ap_list_head; - spinlock_t ap_list_lock; /* Protects the ap_list */ - - unsigned int used_lrs; - - /* - * List of IRQs that this VCPU should consider because they are either - * Active or Pending (hence the name; AP list), or because they recently - * were one of the two and need to be migrated off this list to another - * VCPU. - */ - - /* - * Members below are used with GICv3 emulation only and represent - * parts of the redistributor. - */ - struct vgic_io_device rd_iodev; - struct vgic_io_device sgi_iodev; - - /* Contains the attributes and gpa of the LPI pending tables. */ - uint64_t pendbaser; - - bool lpis_enabled; - - /* Cache guest priority bits */ - uint32_t num_pri_bits; - - /* Cache guest interrupt ID bits */ - uint32_t num_id_bits; -}; - -#endif /* __ASM_ARM_NEW_VGIC_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/nospec.h b/xen/include/asm-arm/nospec.h deleted file mode 100644 index 51c7aea4f4..0000000000 --- a/xen/include/asm-arm/nospec.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. */ - -#ifndef _ASM_ARM_NOSPEC_H -#define _ASM_ARM_NOSPEC_H - -static inline bool evaluate_nospec(bool condition) -{ - return condition; -} - -static inline void block_speculation(void) -{ -} - -#endif /* _ASM_ARM_NOSPEC_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h deleted file mode 100644 index 31a6de4e23..0000000000 --- a/xen/include/asm-arm/numa.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef __ARCH_ARM_NUMA_H -#define __ARCH_ARM_NUMA_H - -#include - -typedef u8 nodeid_t; - -/* Fake one node for now. See also node_online_map. */ -#define cpu_to_node(cpu) 0 -#define node_to_cpumask(node) (cpu_online_map) - -static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr) -{ - return 0; -} - -/* - * TODO: make first_valid_mfn static when NUMA is supported on Arm, this - * is required because the dummy helpers are using it. - */ -extern mfn_t first_valid_mfn; - -/* XXX: implement NUMA support */ -#define node_spanned_pages(nid) (max_page - mfn_x(first_valid_mfn)) -#define node_start_pfn(nid) (mfn_x(first_valid_mfn)) -#define __node_distance(a, b) (20) - -#endif /* __ARCH_ARM_NUMA_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h deleted file mode 100644 index 8f11d9c97b..0000000000 --- a/xen/include/asm-arm/p2m.h +++ /dev/null @@ -1,439 +0,0 @@ -#ifndef _XEN_P2M_H -#define _XEN_P2M_H - -#include -#include -#include -#include - -#include -#include - -#define paddr_bits PADDR_BITS - -/* Holds the bit size of IPAs in p2m tables. */ -extern unsigned int p2m_ipa_bits; - -#ifdef CONFIG_ARM_64 -extern unsigned int p2m_root_order; -extern unsigned int p2m_root_level; -#define P2M_ROOT_ORDER p2m_root_order -#define P2M_ROOT_LEVEL p2m_root_level -#else -/* First level P2M is always 2 consecutive pages */ -#define P2M_ROOT_ORDER 1 -#define P2M_ROOT_LEVEL 1 -#endif - -struct domain; - -extern void memory_type_changed(struct domain *); - -/* Per-p2m-table state */ -struct p2m_domain { - /* - * Lock that protects updates to the p2m. - */ - rwlock_t lock; - - /* Pages used to construct the p2m */ - struct page_list_head pages; - - /* The root of the p2m tree. May be concatenated */ - struct page_info *root; - - /* Current VMID in use */ - uint16_t vmid; - - /* Current Translation Table Base Register for the p2m */ - uint64_t vttbr; - - /* Highest guest frame that's ever been mapped in the p2m */ - gfn_t max_mapped_gfn; - - /* - * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a - * preemptible manner this is update to track recall where to - * resume the search. Apart from during teardown this can only - * decrease. */ - gfn_t lowest_mapped_gfn; - - /* Indicate if it is required to clean the cache when writing an entry */ - bool clean_pte; - - /* - * P2M updates may required TLBs to be flushed (invalidated). - * - * Flushes may be deferred by setting 'need_flush' and then flushing - * when the p2m write lock is released. - * - * If an immediate flush is required (e.g, if a super page is - * shattered), call p2m_tlb_flush_sync(). - */ - bool need_flush; - - /* Gather some statistics for information purposes only */ - struct { - /* Number of mappings at each p2m tree level */ - unsigned long mappings[4]; - /* Number of times we have shattered a mapping - * at each p2m tree level. */ - unsigned long shattered[4]; - } stats; - - /* - * If true, and an access fault comes in and there is no vm_event listener, - * pause domain. Otherwise, remove access restrictions. - */ - bool access_required; - - /* Defines if mem_access is in use for the domain. */ - bool mem_access_enabled; - - /* - * Default P2M access type for each page in the the domain: new pages, - * swapped in pages, cleared pages, and pages that are ambiguously - * retyped get this access type. See definition of p2m_access_t. - */ - p2m_access_t default_access; - - /* - * Radix tree to store the p2m_access_t settings as the pte's don't have - * enough available bits to store this information. - */ - struct radix_tree_root mem_access_settings; - - /* back pointer to domain */ - struct domain *domain; - - /* Keeping track on which CPU this p2m was used and for which vCPU */ - uint8_t last_vcpu_ran[NR_CPUS]; -}; - -/* - * List of possible type for each page in the p2m entry. - * The number of available bit per page in the pte for this purpose is 4 bits. - * So it's possible to only have 16 fields. If we run out of value in the - * future, it's possible to use higher value for pseudo-type and don't store - * them in the p2m entry. - */ -typedef enum { - p2m_invalid = 0, /* Nothing mapped here */ - p2m_ram_rw, /* Normal read/write guest RAM */ - p2m_ram_ro, /* Read-only; writes are silently dropped */ - p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */ - p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */ - p2m_mmio_direct_c, /* Read/write mapping of genuine MMIO area cacheable */ - p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */ - p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */ - p2m_grant_map_rw, /* Read/write grant mapping */ - p2m_grant_map_ro, /* Read-only grant mapping */ - /* The types below are only used to decide the page attribute in the P2M */ - p2m_iommu_map_rw, /* Read/write iommu mapping */ - p2m_iommu_map_ro, /* Read-only iommu mapping */ - p2m_max_real_type, /* Types after this won't be store in the p2m */ -} p2m_type_t; - -/* We use bitmaps and mask to handle groups of types */ -#define p2m_to_mask(_t) (1UL << (_t)) - -/* RAM types, which map to real machine frames */ -#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) | \ - p2m_to_mask(p2m_ram_ro)) - -/* Grant mapping types, which map to a real frame in another VM */ -#define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) | \ - p2m_to_mask(p2m_grant_map_ro)) - -/* Foreign mappings types */ -#define P2M_FOREIGN_TYPES (p2m_to_mask(p2m_map_foreign_rw) | \ - p2m_to_mask(p2m_map_foreign_ro)) - -/* Useful predicates */ -#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES) -#define p2m_is_foreign(_t) (p2m_to_mask(_t) & P2M_FOREIGN_TYPES) -#define p2m_is_any_ram(_t) (p2m_to_mask(_t) & \ - (P2M_RAM_TYPES | P2M_GRANT_TYPES | \ - P2M_FOREIGN_TYPES)) - -/* All common type definitions should live ahead of this inclusion. */ -#ifdef _XEN_P2M_COMMON_H -# error "xen/p2m-common.h should not be included directly" -#endif -#include - -static inline bool arch_acquire_resource_check(struct domain *d) -{ - /* - * The reference counting of foreign entries in set_foreign_p2m_entry() - * is supported on Arm. - */ - return true; -} - -static inline -void p2m_altp2m_check(struct vcpu *v, uint16_t idx) -{ - /* Not supported on ARM. */ -} - -/* - * Helper to restrict "p2m_ipa_bits" according the external entity - * (e.g. IOMMU) requirements. - * - * Each corresponding driver should report the maximum IPA bits - * (Stage-2 input size) it can support. - */ -void p2m_restrict_ipa_bits(unsigned int ipa_bits); - -/* Second stage paging setup, to be called on all CPUs */ -void setup_virt_paging(void); - -/* Init the datastructures for later use by the p2m code */ -int p2m_init(struct domain *d); - -/* Return all the p2m resources to Xen. */ -void p2m_teardown(struct domain *d); - -/* - * Remove mapping refcount on each mapping page in the p2m - * - * TODO: For the moment only foreign mappings are handled - */ -int relinquish_p2m_mapping(struct domain *d); - -/* Context switch */ -void p2m_save_state(struct vcpu *p); -void p2m_restore_state(struct vcpu *n); - -/* Print debugging/statistial info about a domain's p2m */ -void p2m_dump_info(struct domain *d); - -static inline void p2m_write_lock(struct p2m_domain *p2m) -{ - write_lock(&p2m->lock); -} - -void p2m_write_unlock(struct p2m_domain *p2m); - -static inline void p2m_read_lock(struct p2m_domain *p2m) -{ - read_lock(&p2m->lock); -} - -static inline void p2m_read_unlock(struct p2m_domain *p2m) -{ - read_unlock(&p2m->lock); -} - -static inline int p2m_is_locked(struct p2m_domain *p2m) -{ - return rw_is_locked(&p2m->lock); -} - -static inline int p2m_is_write_locked(struct p2m_domain *p2m) -{ - return rw_is_write_locked(&p2m->lock); -} - -void p2m_tlb_flush_sync(struct p2m_domain *p2m); - -/* Look up the MFN corresponding to a domain's GFN. */ -mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t); - -/* - * Get details of a given gfn. - * The P2M lock should be taken by the caller. - */ -mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, - p2m_type_t *t, p2m_access_t *a, - unsigned int *page_order, - bool *valid); - -/* - * Direct set a p2m entry: only for use by the P2M code. - * The P2M write lock should be taken. - */ -int p2m_set_entry(struct p2m_domain *p2m, - gfn_t sgfn, - unsigned long nr, - mfn_t smfn, - p2m_type_t t, - p2m_access_t a); - -bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn); - -void p2m_invalidate_root(struct p2m_domain *p2m); - -/* - * Clean & invalidate caches corresponding to a region [start,end) of guest - * address space. - * - * start will get updated if the function is preempted. - */ -int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end); - -void p2m_set_way_flush(struct vcpu *v, struct cpu_user_regs *regs, - const union hsr hsr); - -void p2m_toggle_cache(struct vcpu *v, bool was_enabled); - -void p2m_flush_vm(struct vcpu *v); - -/* - * Map a region in the guest p2m with a specific p2m type. - * The memory attributes will be derived from the p2m type. - */ -int map_regions_p2mt(struct domain *d, - gfn_t gfn, - unsigned long nr, - mfn_t mfn, - p2m_type_t p2mt); - -int unmap_regions_p2mt(struct domain *d, - gfn_t gfn, - unsigned long nr, - mfn_t mfn); - -int map_dev_mmio_region(struct domain *d, - gfn_t gfn, - unsigned long nr, - mfn_t mfn); - -int p2m_insert_mapping(struct domain *d, gfn_t start_gfn, unsigned long nr, - mfn_t mfn, p2m_type_t t); - -int guest_physmap_add_entry(struct domain *d, - gfn_t gfn, - mfn_t mfn, - unsigned long page_order, - p2m_type_t t); - -/* Untyped version for RAM only, for compatibility */ -static inline int __must_check -guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn, - unsigned int page_order) -{ - return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw); -} - -static inline int guest_physmap_add_pages(struct domain *d, - gfn_t gfn, - mfn_t mfn, - unsigned int nr_pages) -{ - return p2m_insert_mapping(d, gfn, nr_pages, mfn, p2m_ram_rw); -} - -mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn); - -/* Look up a GFN and take a reference count on the backing page. */ -typedef unsigned int p2m_query_t; -#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ -#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ - -struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn, - p2m_type_t *t); - -static inline struct page_info *get_page_from_gfn( - struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) -{ - mfn_t mfn; - p2m_type_t _t; - struct page_info *page; - - /* - * Special case for DOMID_XEN as it is the only domain so far that is - * not auto-translated. - */ - if ( likely(d != dom_xen) ) - return p2m_get_page_from_gfn(d, _gfn(gfn), t); - - if ( !t ) - t = &_t; - - *t = p2m_invalid; - - /* - * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the - * page. - */ - mfn = _mfn(gfn); - page = mfn_to_page(mfn); - - if ( !mfn_valid(mfn) || !get_page(page, d) ) - return NULL; - - if ( page->u.inuse.type_info & PGT_writable_page ) - *t = p2m_ram_rw; - else - *t = p2m_ram_ro; - - return page; -} - -int get_page_type(struct page_info *page, unsigned long type); -bool is_iomem_page(mfn_t mfn); -static inline int get_page_and_type(struct page_info *page, - struct domain *domain, - unsigned long type) -{ - int rc = get_page(page, domain); - - if ( likely(rc) && unlikely(!get_page_type(page, type)) ) - { - put_page(page); - rc = 0; - } - - return rc; -} - -/* get host p2m table */ -#define p2m_get_hostp2m(d) (&(d)->arch.p2m) - -static inline bool p2m_vm_event_sanity_check(struct domain *d) -{ - return true; -} - -/* - * Return the start of the next mapping based on the order of the - * current one. - */ -static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order) -{ - /* - * The order corresponds to the order of the mapping (or invalid - * range) in the page table. So we need to align the GFN before - * incrementing. - */ - gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1)); - - return gfn_add(gfn, 1UL << order); -} - -/* - * A vCPU has cache enabled only when the MMU is enabled and data cache - * is enabled. - */ -static inline bool vcpu_has_cache_enabled(struct vcpu *v) -{ - const register_t mask = SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_M; - - /* Only works with the current vCPU */ - ASSERT(current == v); - - return (READ_SYSREG(SCTLR_EL1) & mask) == mask; -} - -#endif /* _XEN_P2M_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/page-bits.h b/xen/include/asm-arm/page-bits.h deleted file mode 100644 index 5d6477e599..0000000000 --- a/xen/include/asm-arm/page-bits.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ARM_PAGE_SHIFT_H__ -#define __ARM_PAGE_SHIFT_H__ - -#define PAGE_SHIFT 12 - -#ifdef CONFIG_ARM_64 -#define PADDR_BITS 48 -#else -#define PADDR_BITS 40 -#endif - -#endif /* __ARM_PAGE_SHIFT_H__ */ diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h deleted file mode 100644 index c6f9fb0d4e..0000000000 --- a/xen/include/asm-arm/page.h +++ /dev/null @@ -1,293 +0,0 @@ -#ifndef __ARM_PAGE_H__ -#define __ARM_PAGE_H__ - -#include -#include -#include -#include -#include - -/* Shareability values for the LPAE entries */ -#define LPAE_SH_NON_SHAREABLE 0x0 -#define LPAE_SH_UNPREDICTALE 0x1 -#define LPAE_SH_OUTER 0x2 -#define LPAE_SH_INNER 0x3 - -/* - * Attribute Indexes. - * - * These are valid in the AttrIndx[2:0] field of an LPAE stage 1 page - * table entry. They are indexes into the bytes of the MAIR* - * registers, as defined below. - * - */ -#define MT_DEVICE_nGnRnE 0x0 -#define MT_NORMAL_NC 0x1 -#define MT_NORMAL_WT 0x2 -#define MT_NORMAL_WB 0x3 -#define MT_DEVICE_nGnRE 0x4 -#define MT_NORMAL 0x7 - -/* - * LPAE Memory region attributes. Indexed by the AttrIndex bits of a - * LPAE entry; the 8-bit fields are packed little-endian into MAIR0 and MAIR1. - * - * See section "Device memory" B2.7.2 in ARM DDI 0487B.a for more - * details about the meaning of *G*R*E. - * - * ai encoding - * MT_DEVICE_nGnRnE 000 0000 0000 -- Strongly Ordered/Device nGnRnE - * MT_NORMAL_NC 001 0100 0100 -- Non-Cacheable - * MT_NORMAL_WT 010 1010 1010 -- Write-through - * MT_NORMAL_WB 011 1110 1110 -- Write-back - * MT_DEVICE_nGnRE 100 0000 0100 -- Device nGnRE - * ?? 101 - * reserved 110 - * MT_NORMAL 111 1111 1111 -- Write-back write-allocate - * - * /!\ It is not possible to combine the definition in MAIRVAL and then - * split because it would result to a 64-bit value that some assembler - * doesn't understand. - */ -#define _MAIR0(attr, mt) (_AC(attr, ULL) << ((mt) * 8)) -#define _MAIR1(attr, mt) (_AC(attr, ULL) << (((mt) * 8) - 32)) - -#define MAIR0VAL (_MAIR0(0x00, MT_DEVICE_nGnRnE)| \ - _MAIR0(0x44, MT_NORMAL_NC) | \ - _MAIR0(0xaa, MT_NORMAL_WT) | \ - _MAIR0(0xee, MT_NORMAL_WB)) - -#define MAIR1VAL (_MAIR1(0x04, MT_DEVICE_nGnRE) | \ - _MAIR1(0xff, MT_NORMAL)) - -#define MAIRVAL (MAIR1VAL << 32 | MAIR0VAL) - -/* - * Layout of the flags used for updating the hypervisor page tables - * - * [0:2] Memory Attribute Index - * [3:4] Permission flags - * [5] Page present - * [6] Only populate page tables - */ -#define PAGE_AI_MASK(x) ((x) & 0x7U) - -#define _PAGE_XN_BIT 3 -#define _PAGE_RO_BIT 4 -#define _PAGE_XN (1U << _PAGE_XN_BIT) -#define _PAGE_RO (1U << _PAGE_RO_BIT) -#define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x1U) -#define PAGE_RO_MASK(x) (((x) >> _PAGE_RO_BIT) & 0x1U) - -#define _PAGE_PRESENT (1U << 5) -#define _PAGE_POPULATE (1U << 6) - -/* - * _PAGE_DEVICE and _PAGE_NORMAL are convenience defines. They are not - * meant to be used outside of this header. - */ -#define _PAGE_DEVICE (_PAGE_XN|_PAGE_PRESENT) -#define _PAGE_NORMAL (MT_NORMAL|_PAGE_PRESENT) - -#define PAGE_HYPERVISOR_RO (_PAGE_NORMAL|_PAGE_RO|_PAGE_XN) -#define PAGE_HYPERVISOR_RX (_PAGE_NORMAL|_PAGE_RO) -#define PAGE_HYPERVISOR_RW (_PAGE_NORMAL|_PAGE_XN) - -#define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW -#define PAGE_HYPERVISOR_NOCACHE (_PAGE_DEVICE|MT_DEVICE_nGnRE) -#define PAGE_HYPERVISOR_WC (_PAGE_DEVICE|MT_NORMAL_NC) - -/* - * Stage 2 Memory Type. - * - * These are valid in the MemAttr[3:0] field of an LPAE stage 2 page - * table entry. - * - */ -#define MATTR_DEV 0x1 -#define MATTR_MEM_NC 0x5 -#define MATTR_MEM 0xf - -/* Flags for get_page_from_gva, gvirt_to_maddr etc */ -#define GV2M_READ (0u<<0) -#define GV2M_WRITE (1u<<0) -#define GV2M_EXEC (1u<<1) - -#ifndef __ASSEMBLY__ - -#include -#include -#include -#include - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -/* Architectural minimum cacheline size is 4 32-bit words. */ -#define MIN_CACHELINE_BYTES 16 -/* Min dcache line size on the boot CPU. */ -extern size_t dcache_line_bytes; - -#define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE) - -static inline size_t read_dcache_line_bytes(void) -{ - register_t ctr; - - /* Read CTR */ - ctr = READ_SYSREG(CTR_EL0); - - /* Bits 16-19 are the log2 number of words in the cacheline. */ - return (size_t) (4 << ((ctr >> 16) & 0xf)); -} - -/* Functions for flushing medium-sized areas. - * if 'range' is large enough we might want to use model-specific - * full-cache flushes. */ - -static inline int invalidate_dcache_va_range(const void *p, unsigned long size) -{ - const void *end = p + size; - size_t cacheline_mask = dcache_line_bytes - 1; - - dsb(sy); /* So the CPU issues all writes to the range */ - - if ( (uintptr_t)p & cacheline_mask ) - { - p = (void *)((uintptr_t)p & ~cacheline_mask); - asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); - p += dcache_line_bytes; - } - if ( (uintptr_t)end & cacheline_mask ) - { - end = (void *)((uintptr_t)end & ~cacheline_mask); - asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end)); - } - - for ( ; p < end; p += dcache_line_bytes ) - asm volatile (__invalidate_dcache_one(0) : : "r" (p)); - - dsb(sy); /* So we know the flushes happen before continuing */ - - return 0; -} - -static inline int clean_dcache_va_range(const void *p, unsigned long size) -{ - const void *end = p + size; - dsb(sy); /* So the CPU issues all writes to the range */ - p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1)); - for ( ; p < end; p += dcache_line_bytes ) - asm volatile (__clean_dcache_one(0) : : "r" (p)); - dsb(sy); /* So we know the flushes happen before continuing */ - /* ARM callers assume that dcache_* functions cannot fail. */ - return 0; -} - -static inline int clean_and_invalidate_dcache_va_range - (const void *p, unsigned long size) -{ - const void *end = p + size; - dsb(sy); /* So the CPU issues all writes to the range */ - p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1)); - for ( ; p < end; p += dcache_line_bytes ) - asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); - dsb(sy); /* So we know the flushes happen before continuing */ - /* ARM callers assume that dcache_* functions cannot fail. */ - return 0; -} - -/* Macros for flushing a single small item. The predicate is always - * compile-time constant so this will compile down to 3 instructions in - * the common case. */ -#define clean_dcache(x) do { \ - typeof(x) *_p = &(x); \ - if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \ - clean_dcache_va_range(_p, sizeof(x)); \ - else \ - asm volatile ( \ - "dsb sy;" /* Finish all earlier writes */ \ - __clean_dcache_one(0) \ - "dsb sy;" /* Finish flush before continuing */ \ - : : "r" (_p), "m" (*_p)); \ -} while (0) - -#define clean_and_invalidate_dcache(x) do { \ - typeof(x) *_p = &(x); \ - if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \ - clean_and_invalidate_dcache_va_range(_p, sizeof(x)); \ - else \ - asm volatile ( \ - "dsb sy;" /* Finish all earlier writes */ \ - __clean_and_invalidate_dcache_one(0) \ - "dsb sy;" /* Finish flush before continuing */ \ - : : "r" (_p), "m" (*_p)); \ -} while (0) - -/* Flush the dcache for an entire page. */ -void flush_page_to_ram(unsigned long mfn, bool sync_icache); - -/* - * Print a walk of a page table or p2m - * - * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2) - * addr is the PA or IPA to translate - * root_level is the starting level of the page table - * (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 ) - * nr_root_tables is the number of concatenated tables at the root. - * this can only be != 1 for P2M walks starting at the first or - * subsequent level. - */ -void dump_pt_walk(paddr_t ttbr, paddr_t addr, - unsigned int root_level, - unsigned int nr_root_tables); - -/* Print a walk of the hypervisor's page tables for a virtual addr. */ -extern void dump_hyp_walk(vaddr_t addr); -/* Print a walk of the p2m for a domain for a physical address. */ -extern void dump_p2m_lookup(struct domain *d, paddr_t addr); - -static inline uint64_t va_to_par(vaddr_t va) -{ - uint64_t par = __va_to_par(va); - /* It is not OK to call this with an invalid VA */ - if ( par & PAR_F ) - { - dump_hyp_walk(va); - panic_PAR(par); - } - return par; -} - -static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr, unsigned int flags) -{ - uint64_t par = gva_to_ipa_par(va, flags); - if ( par & PAR_F ) - return -EFAULT; - *paddr = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK); - return 0; -} - -/* Bits in the PAR returned by va_to_par */ -#define PAR_FAULT 0x1 - -#endif /* __ASSEMBLY__ */ - -#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) - -#endif /* __ARM_PAGE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/paging.h b/xen/include/asm-arm/paging.h deleted file mode 100644 index 6d1a000246..0000000000 --- a/xen/include/asm-arm/paging.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _XEN_PAGING_H -#define _XEN_PAGING_H - -#define paging_mode_translate(d) (1) -#define paging_mode_external(d) (1) - -#endif /* XEN_PAGING_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/pci.h b/xen/include/asm-arm/pci.h deleted file mode 100644 index 9736d6816d..0000000000 --- a/xen/include/asm-arm/pci.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_PCI_H__ -#define __ARM_PCI_H__ - -#ifdef CONFIG_HAS_PCI - -#define pci_to_dev(pcidev) (&(pcidev)->arch.dev) - -extern bool pci_passthrough_enabled; - -/* Arch pci dev struct */ -struct arch_pci_dev { - struct device dev; -}; - -/* Arch-specific MSI data for vPCI. */ -struct vpci_arch_msi { -}; - -/* Arch-specific MSI-X entry data for vPCI. */ -struct vpci_arch_msix_entry { -}; - -/* - * Because of the header cross-dependencies, e.g. we need both - * struct pci_dev and struct arch_pci_dev at the same time, this cannot be - * done with an inline here. Macro can be implemented, but looks scary. - */ -struct pci_dev *dev_to_pci(struct device *dev); - -/* - * struct to hold the mappings of a config space window. This - * is expected to be used as sysdata for PCI controllers that - * use ECAM. - */ -struct pci_config_window { - paddr_t phys_addr; - paddr_t size; - uint8_t busn_start; - uint8_t busn_end; - void __iomem *win; -}; - -/* - * struct to hold pci host bridge information - * for a PCI controller. - */ -struct pci_host_bridge { - struct dt_device_node *dt_node; /* Pointer to the associated DT node */ - struct list_head node; /* Node in list of host bridges */ - uint16_t segment; /* Segment number */ - struct pci_config_window* cfg; /* Pointer to the bridge config window */ - const struct pci_ops *ops; -}; - -struct pci_ops { - void __iomem *(*map_bus)(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, - uint32_t offset); - int (*read)(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, - uint32_t reg, uint32_t len, uint32_t *value); - int (*write)(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, - uint32_t reg, uint32_t len, uint32_t value); -}; - -/* - * struct to hold pci ops and bus shift of the config window - * for a PCI controller. - */ -struct pci_ecam_ops { - unsigned int bus_shift; - struct pci_ops pci_ops; - int (*cfg_reg_index)(struct dt_device_node *dev); - int (*init)(struct pci_config_window *); -}; - -/* Default ECAM ops */ -extern const struct pci_ecam_ops pci_generic_ecam_ops; - -int pci_host_common_probe(struct dt_device_node *dev, - const struct pci_ecam_ops *ops); -int pci_generic_config_read(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, - uint32_t reg, uint32_t len, uint32_t *value); -int pci_generic_config_write(struct pci_host_bridge *bridge, pci_sbdf_t sbdf, - uint32_t reg, uint32_t len, uint32_t value); -void __iomem *pci_ecam_map_bus(struct pci_host_bridge *bridge, - pci_sbdf_t sbdf, uint32_t where); -struct pci_host_bridge *pci_find_host_bridge(uint16_t segment, uint8_t bus); -struct dt_device_node *pci_find_host_bridge_node(struct device *dev); -int pci_get_host_bridge_segment(const struct dt_device_node *node, - uint16_t *segment); - -static always_inline bool is_pci_passthrough_enabled(void) -{ - return pci_passthrough_enabled; -} - -void arch_pci_init_pdev(struct pci_dev *pdev); - -#else /*!CONFIG_HAS_PCI*/ - -struct arch_pci_dev { }; - -static always_inline bool is_pci_passthrough_enabled(void) -{ - return false; -} - -struct pci_dev; - -static inline void arch_pci_init_pdev(struct pci_dev *pdev) {} - -static inline int pci_get_host_bridge_segment(const struct dt_device_node *node, - uint16_t *segment) -{ - ASSERT_UNREACHABLE(); - return -EINVAL; -} - -#endif /*!CONFIG_HAS_PCI*/ -#endif /* __ARM_PCI_H__ */ diff --git a/xen/include/asm-arm/percpu.h b/xen/include/asm-arm/percpu.h deleted file mode 100644 index f1a8768080..0000000000 --- a/xen/include/asm-arm/percpu.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __ARM_PERCPU_H__ -#define __ARM_PERCPU_H__ - -#ifndef __ASSEMBLY__ - -#include -#include - -extern char __per_cpu_start[], __per_cpu_data_end[]; -extern unsigned long __per_cpu_offset[NR_CPUS]; -void percpu_init_areas(void); - -#define per_cpu(var, cpu) \ - (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) -#define this_cpu(var) \ - (*RELOC_HIDE(&per_cpu__##var, READ_SYSREG(TPIDR_EL2))) - -#define per_cpu_ptr(var, cpu) \ - (*RELOC_HIDE(var, __per_cpu_offset[cpu])) -#define this_cpu_ptr(var) \ - (*RELOC_HIDE(var, READ_SYSREG(TPIDR_EL2))) - -#endif - -#endif /* __ARM_PERCPU_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/perfc.h b/xen/include/asm-arm/perfc.h deleted file mode 100644 index 95c4b2b6b7..0000000000 --- a/xen/include/asm-arm/perfc.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef __ASM_PERFC_H__ -#define __ASM_PERFC_H__ - -static inline void arch_perfc_reset(void) -{ -} - -static inline void arch_perfc_gather(void) -{ -} - -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/perfc_defn.h b/xen/include/asm-arm/perfc_defn.h deleted file mode 100644 index 31f071222b..0000000000 --- a/xen/include/asm-arm/perfc_defn.h +++ /dev/null @@ -1,89 +0,0 @@ -/* This file is legitimately included multiple times. */ -/*#ifndef __XEN_PERFC_DEFN_H__*/ -/*#define __XEN_PERFC_DEFN_H__*/ - -PERFCOUNTER(invalid_hypercalls, "invalid hypercalls") - -PERFCOUNTER(trap_wfi, "trap: wfi") -PERFCOUNTER(trap_wfe, "trap: wfe") -PERFCOUNTER(trap_cp15_32, "trap: cp15 32-bit access") -PERFCOUNTER(trap_cp15_64, "trap: cp15 64-bit access") -PERFCOUNTER(trap_cp14_32, "trap: cp14 32-bit access") -PERFCOUNTER(trap_cp14_64, "trap: cp14 64-bit access") -PERFCOUNTER(trap_cp14_dbg, "trap: cp14 dbg access") -PERFCOUNTER(trap_cp10, "trap: cp10 access") -PERFCOUNTER(trap_cp, "trap: cp access") -PERFCOUNTER(trap_smc32, "trap: 32-bit smc") -PERFCOUNTER(trap_hvc32, "trap: 32-bit hvc") -#ifdef CONFIG_ARM_64 -PERFCOUNTER(trap_smc64, "trap: 64-bit smc") -PERFCOUNTER(trap_hvc64, "trap: 64-bit hvc") -PERFCOUNTER(trap_sysreg, "trap: sysreg access") -#endif -PERFCOUNTER(trap_iabt, "trap: guest instr abort") -PERFCOUNTER(trap_dabt, "trap: guest data abort") -PERFCOUNTER(trap_uncond, "trap: condition failed") - -PERFCOUNTER(vpsci_cpu_on, "vpsci: cpu_on") -PERFCOUNTER(vpsci_cpu_off, "vpsci: cpu_off") -PERFCOUNTER(vpsci_version, "vpsci: version") -PERFCOUNTER(vpsci_migrate_info_type, "vpsci: migrate_info_type") -PERFCOUNTER(vpsci_system_off, "vpsci: system_off") -PERFCOUNTER(vpsci_system_reset, "vpsci: system_reset") -PERFCOUNTER(vpsci_cpu_suspend, "vpsci: cpu_suspend") -PERFCOUNTER(vpsci_cpu_affinity_info, "vpsci: cpu_affinity_info") -PERFCOUNTER(vpsci_features, "vpsci: features") - -PERFCOUNTER(vcpu_kick, "vcpu: notify other vcpu") - -PERFCOUNTER(vgicd_reads, "vgicd: read") -PERFCOUNTER(vgicd_writes, "vgicd: write") -PERFCOUNTER(vgicr_reads, "vgicr: read") -PERFCOUNTER(vgicr_writes, "vgicr: write") -PERFCOUNTER(vgic_cp64_reads, "vgic: cp64 read") -PERFCOUNTER(vgic_cp64_writes, "vgic: cp64 write") -PERFCOUNTER(vgic_sysreg_reads, "vgic: sysreg read") -PERFCOUNTER(vgic_sysreg_writes, "vgic: sysreg write") -PERFCOUNTER(vgic_sgi_list , "vgic: SGI send to list") -PERFCOUNTER(vgic_sgi_others, "vgic: SGI send to others") -PERFCOUNTER(vgic_sgi_self, "vgic: SGI send to self") -PERFCOUNTER(vgic_irq_migrates, "vgic: irq migration") - -PERFCOUNTER(vuart_reads, "vuart: read") -PERFCOUNTER(vuart_writes, "vuart: write") - -PERFCOUNTER(vtimer_cp32_reads, "vtimer: cp32 read") -PERFCOUNTER(vtimer_cp32_writes, "vtimer: cp32 write") - -PERFCOUNTER(vtimer_cp64_reads, "vtimer: cp64 read") -PERFCOUNTER(vtimer_cp64_writes, "vtimer: cp64 write") - -PERFCOUNTER(vtimer_sysreg_reads, "vtimer: sysreg read") -PERFCOUNTER(vtimer_sysreg_writes, "vtimer: sysreg write") - -PERFCOUNTER(vtimer_phys_inject, "vtimer: phys expired, injected") -PERFCOUNTER(vtimer_phys_masked, "vtimer: phys expired, masked") -PERFCOUNTER(vtimer_virt_inject, "vtimer: virt expired, injected") - -PERFCOUNTER(ppis, "#PPIs") -PERFCOUNTER(spis, "#SPIs") -PERFCOUNTER(guest_irqs, "#GUEST-IRQS") - -PERFCOUNTER(hyp_timer_irqs, "Hypervisor timer interrupts") -PERFCOUNTER(phys_timer_irqs, "Physical timer interrupts") -PERFCOUNTER(virt_timer_irqs, "Virtual timer interrupts") -PERFCOUNTER(maintenance_irqs, "Maintenance interrupts") - -PERFCOUNTER(atomics_guest, "atomics: guest access") -PERFCOUNTER(atomics_guest_paused, "atomics: guest paused") - -/*#endif*/ /* __XEN_PERFC_DEFN_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/pl011-uart.h b/xen/include/asm-arm/pl011-uart.h deleted file mode 100644 index 57e9ec73ac..0000000000 --- a/xen/include/asm-arm/pl011-uart.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * xen/include/asm-arm/pl011-uart.h - * - * Common constant definition between early printk and the UART driver - * for the pl011 UART - * - * Tim Deegan - * Copyright (c) 2011 Citrix Systems. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_PL011_H -#define __ASM_ARM_PL011_H - -/* PL011 register addresses */ -#define DR (0x00) -#define RSR (0x04) -#define FR (0x18) -#define ILPR (0x20) -#define IBRD (0x24) -#define FBRD (0x28) -#define LCR_H (0x2c) -#define CR (0x30) -#define IFLS (0x34) -#define IMSC (0x38) -#define RIS (0x3c) -#define MIS (0x40) -#define ICR (0x44) -#define DMACR (0x48) - -/* CR bits */ -#define CTSEN (1<<15) /* automatic CTS hardware flow control */ -#define RTSEN (1<<14) /* automatic RTS hardware flow control */ -#define RTS (1<<11) /* RTS signal */ -#define DTR (1<<10) /* DTR signal */ -#define RXE (1<<9) /* Receive enable */ -#define TXE (1<<8) /* Transmit enable */ -#define UARTEN (1<<0) /* UART enable */ - -/* FR bits */ -#define TXFE (1<<7) /* TX FIFO empty */ -#define RXFE (1<<4) /* RX FIFO empty */ -#define TXFF (1<<5) /* TX FIFO full */ -#define RXFF (1<<6) /* RX FIFO full */ -#define BUSY (1<<3) /* Transmit is not complete */ - -/* LCR_H bits */ -#define SPS (1<<7) /* Stick parity select */ -#define FEN (1<<4) /* FIFO enable */ -#define STP2 (1<<3) /* Two stop bits select */ -#define EPS (1<<2) /* Even parity select */ -#define PEN (1<<1) /* Parity enable */ -#define BRK (1<<0) /* Send break */ - -/* Interrupt bits (IMSC, MIS, ICR) */ -#define OEI (1<<10) /* Overrun Error interrupt mask */ -#define BEI (1<<9) /* Break Error interrupt mask */ -#define PEI (1<<8) /* Parity Error interrupt mask */ -#define FEI (1<<7) /* Framing Error interrupt mask */ -#define RTI (1<<6) /* Receive Timeout interrupt mask */ -#define TXI (1<<5) /* Transmit interrupt mask */ -#define RXI (1<<4) /* Receive interrupt mask */ -#define DSRMI (1<<3) /* nUARTDSR Modem interrupt mask */ -#define DCDMI (1<<2) /* nUARTDCD Modem interrupt mask */ -#define CTSMI (1<<1) /* nUARTCTS Modem interrupt mask */ -#define RIMI (1<<0) /* nUARTRI Modem interrupt mask */ -#define ALLI OEI|BEI|PEI|FEI|RTI|TXI|RXI|DSRMI|DCDMI|CTSMI|RIMI - -#endif /* __ASM_ARM_PL011_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/platform.h b/xen/include/asm-arm/platform.h deleted file mode 100644 index 997eb25216..0000000000 --- a/xen/include/asm-arm/platform.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef __ASM_ARM_PLATFORM_H -#define __ASM_ARM_PLATFORM_H - -#include -#include -#include - -/* Describe specific operation for a board */ -struct platform_desc { - /* Platform name */ - const char *name; - /* Array of device tree 'compatible' strings */ - const char *const *compatible; - /* Platform initialization */ - int (*init)(void); - int (*init_time)(void); -#ifdef CONFIG_ARM_32 - /* SMP */ - int (*smp_init)(void); - int (*cpu_up)(int cpu); -#endif - /* Specific mapping for dom0 */ - int (*specific_mapping)(struct domain *d); - /* Platform reset */ - void (*reset)(void); - /* Platform power-off */ - void (*poweroff)(void); - /* Platform specific SMC handler */ - bool (*smc)(struct cpu_user_regs *regs); - /* - * Platform quirks - * Defined has a function because a platform can support multiple - * board with different quirk on each - */ - uint32_t (*quirks)(void); - /* - * Platform blacklist devices - * List of devices which must not pass-through to a guest - */ - const struct dt_device_match *blacklist_dev; - /* Override the DMA width (32-bit by default). */ - unsigned int dma_bitsize; -}; - -/* - * Quirk for platforms where device tree incorrectly reports 4K GICC - * size, but actually the two GICC register ranges are placed at 64K - * stride. - */ -#define PLATFORM_QUIRK_GIC_64K_STRIDE (1 << 0) - -void platform_init(void); -int platform_init_time(void); -int platform_specific_mapping(struct domain *d); -#ifdef CONFIG_ARM_32 -int platform_smp_init(void); -int platform_cpu_up(int cpu); -#endif -void platform_reset(void); -void platform_poweroff(void); -bool platform_smc(struct cpu_user_regs *regs); -bool platform_has_quirk(uint32_t quirk); -bool platform_device_is_blacklisted(const struct dt_device_node *node); - -#define PLATFORM_START(_name, _namestr) \ -static const struct platform_desc __plat_desc_##_name __used \ -__section(".arch.info") = { \ - .name = _namestr, - -#define PLATFORM_END \ -}; - -#endif /* __ASM_ARM_PLATFORM_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/platforms/exynos5.h b/xen/include/asm-arm/platforms/exynos5.h deleted file mode 100644 index aef5c67084..0000000000 --- a/xen/include/asm-arm/platforms/exynos5.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __ASM_ARM_PLATFORMS_EXYNOS5_H -#define __ASM_ARM_PLATFORMS_EXYNOS5_H - -#define EXYNOS5_MCT_G_TCON 0x240 /* Relative to MCT_BASE */ -#define EXYNOS5_MCT_G_TCON_START (1 << 8) - -#define EXYNOS5_PA_CHIPID 0x10000000 -#define EXYNOS5_PA_TIMER 0x12dd0000 - -#define EXYNOS5_SWRESET 0x0400 /* Relative to PA_PMU */ - -#endif /* __ASM_ARM_PLATFORMS_EXYNOS5_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/platforms/midway.h b/xen/include/asm-arm/platforms/midway.h deleted file mode 100644 index 099e4350f9..0000000000 --- a/xen/include/asm-arm/platforms/midway.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef __ASM_ARM_PLATFORMS_MIDWAY_H -#define __ASM_ASM_PLATFORMS_MIDWAY_H - -/* addresses of SREG registers for resetting the SoC */ -#define MW_SREG_PWR_REQ 0xfff3cf00 -#define MW_SREG_A15_PWR_CTRL 0xfff3c200 - -#define MW_PWR_SUSPEND 0 -#define MW_PWR_SOFT_RESET 1 -#define MW_PWR_HARD_RESET 2 -#define MW_PWR_SHUTDOWN 3 - -#endif /* __ASM_ARM_PLATFORMS_MIDWAY_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/platforms/omap5.h b/xen/include/asm-arm/platforms/omap5.h deleted file mode 100644 index c559c84b61..0000000000 --- a/xen/include/asm-arm/platforms/omap5.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef __ASM_ARM_PLATFORMS_OMAP5_H -#define __ASM_ASM_PLATFORMS_OMAP5_H - -#define REALTIME_COUNTER_BASE 0x48243200 -#define INCREMENTER_NUMERATOR_OFFSET 0x10 -#define INCREMENTER_DENUMERATOR_RELOAD_OFFSET 0x14 -#define NUMERATOR_DENUMERATOR_MASK 0xfffff000 -#define PRM_FRAC_INCREMENTER_DENUMERATOR_RELOAD 0x00010000 - -#define OMAP5_L4_WKUP 0x4AE00000 -#define OMAP5_PRM_BASE (OMAP5_L4_WKUP + 0x6000) -#define OMAP5_CKGEN_PRM_BASE (OMAP5_PRM_BASE + 0x100) -#define OMAP5_CM_CLKSEL_SYS 0x10 -#define SYS_CLKSEL_MASK 0xfffffff8 - -#define OMAP5_PRCM_MPU_BASE 0x48243000 -#define OMAP5_WKUPGEN_BASE 0x48281000 -#define OMAP5_SRAM_PA 0x40300000 - -#define OMAP_AUX_CORE_BOOT_0_OFFSET 0x800 -#define OMAP_AUX_CORE_BOOT_1_OFFSET 0x804 - -#endif /* __ASM_ARM_PLATFORMS_OMAP5_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/platforms/vexpress.h b/xen/include/asm-arm/platforms/vexpress.h deleted file mode 100644 index 8b45d3a850..0000000000 --- a/xen/include/asm-arm/platforms/vexpress.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef __ASM_ARM_PLATFORMS_VEXPRESS_H -#define __ASM_ARM_PLATFORMS_VEXPRESS_H - -/* V2M */ -#define V2M_SYS_MMIO_BASE (0x1c010000) -#define V2M_SYS_FLAGSSET (0x30) -#define V2M_SYS_FLAGSCLR (0x34) - -#define V2M_SYS_CFGDATA (0x00A0) -#define V2M_SYS_CFGCTRL (0x00A4) -#define V2M_SYS_CFGSTAT (0x00A8) - -#define V2M_SYS_CFG_START (1<<31) -#define V2M_SYS_CFG_WRITE (1<<30) -#define V2M_SYS_CFG_ERROR (1<<1) -#define V2M_SYS_CFG_COMPLETE (1<<0) - -#define V2M_SYS_CFG_OSC_FUNC 1 -#define V2M_SYS_CFG_OSC0 0 -#define V2M_SYS_CFG_OSC1 1 -#define V2M_SYS_CFG_OSC2 2 -#define V2M_SYS_CFG_OSC3 3 -#define V2M_SYS_CFG_OSC4 4 -#define V2M_SYS_CFG_OSC5 5 - -/* Board-specific: base address of system controller */ -#define SP810_ADDRESS 0x1C020000 - -#endif /* __ASM_ARM_PLATFORMS_VEXPRESS_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/platforms/xilinx-zynqmp-eemi.h b/xen/include/asm-arm/platforms/xilinx-zynqmp-eemi.h deleted file mode 100644 index cf25a9014d..0000000000 --- a/xen/include/asm-arm/platforms/xilinx-zynqmp-eemi.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 2018 Xilinx Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_PLATFORMS_ZYNQMP_H -#define __ASM_ARM_PLATFORMS_ZYNQMP_H - -#include -#include - -#define EEMI_FID(fid) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_64, \ - ARM_SMCCC_OWNER_SIP, \ - fid) - -enum pm_api_id { - /* Miscellaneous API functions: */ - PM_GET_API_VERSION = 1, /* Do not change or move */ - PM_SET_CONFIGURATION, - PM_GET_NODE_STATUS, - PM_GET_OP_CHARACTERISTIC, - PM_REGISTER_NOTIFIER, - /* API for suspending of PUs: */ - PM_REQ_SUSPEND, - PM_SELF_SUSPEND, - PM_FORCE_POWERDOWN, - PM_ABORT_SUSPEND, - PM_REQ_WAKEUP, - PM_SET_WAKEUP_SOURCE, - PM_SYSTEM_SHUTDOWN, - /* API for managing PM slaves: */ - PM_REQ_NODE, - PM_RELEASE_NODE, - PM_SET_REQUIREMENT, - PM_SET_MAX_LATENCY, - /* Direct control API functions: */ - PM_RESET_ASSERT, - PM_RESET_GET_STATUS, - PM_MMIO_WRITE, - PM_MMIO_READ, - PM_INIT, - PM_FPGA_LOAD, - PM_FPGA_GET_STATUS, - PM_GET_CHIPID, - /* ID 25 is been used by U-boot to process secure boot images */ - /* Secure library generic API functions */ - PM_SECURE_SHA = 26, - PM_SECURE_RSA, - /* Pin control API functions */ - PM_PINCTRL_REQUEST, - PM_PINCTRL_RELEASE, - PM_PINCTRL_GET_FUNCTION, - PM_PINCTRL_SET_FUNCTION, - PM_PINCTRL_CONFIG_PARAM_GET, - PM_PINCTRL_CONFIG_PARAM_SET, - /* PM IOCTL API */ - PM_IOCTL, - /* API to query information from firmware */ - PM_QUERY_DATA, - /* Clock control API functions */ - PM_CLOCK_ENABLE, - PM_CLOCK_DISABLE, - PM_CLOCK_GETSTATE, - PM_CLOCK_SETDIVIDER, - PM_CLOCK_GETDIVIDER, - PM_CLOCK_SETRATE, - PM_CLOCK_GETRATE, - PM_CLOCK_SETPARENT, - PM_CLOCK_GETPARENT, - PM_GET_TRUSTZONE_VERSION = 2563, - PM_API_MAX -}; - -/** - * @XST_PM_SUCCESS: Success - * @XST_PM_INTERNAL: Unexpected error - * @XST_PM_CONFLICT: Conflicting requirements - * @XST_PM_NO_ACCESS: Access rights violation - * @XST_PM_INVALID_NODE: Does not apply to node passed as argument - * @XST_PM_DOUBLE_REQ: Duplicate request - * @XST_PM_ABORT_SUSPEND: Target has aborted suspend - */ -enum pm_ret_status { - XST_PM_SUCCESS = 0, - XST_PM_INTERNAL = 2000, - XST_PM_CONFLICT, - XST_PM_NO_ACCESS, - XST_PM_INVALID_NODE, - XST_PM_DOUBLE_REQ, - XST_PM_ABORT_SUSPEND, -}; - -/* IPI SMC function numbers enum definition and fids */ -#define IPI_MAILBOX_FID(fid) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_SIP, \ - fid) -enum ipi_api_id { - IPI_MAILBOX_OPEN = 0x1000, - IPI_MAILBOX_RELEASE, - IPI_MAILBOX_STATUS_ENQUIRY, - IPI_MAILBOX_NOTIFY, - IPI_MAILBOX_ACK, - IPI_MAILBOX_ENABLE_IRQ, - IPI_MAILBOX_DISABLE_IRQ, -}; - -extern bool zynqmp_eemi(struct cpu_user_regs *regs); - -#endif /* __ASM_ARM_PLATFORMS_ZYNQMP_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h deleted file mode 100644 index 8ab2940f68..0000000000 --- a/xen/include/asm-arm/processor.h +++ /dev/null @@ -1,598 +0,0 @@ -#ifndef __ASM_ARM_PROCESSOR_H -#define __ASM_ARM_PROCESSOR_H - -#ifndef __ASSEMBLY__ -#include -#endif -#include - -/* CTR Cache Type Register */ -#define CTR_L1IP_MASK 0x3 -#define CTR_L1IP_SHIFT 14 -#define CTR_DMINLINE_SHIFT 16 -#define CTR_IMINLINE_SHIFT 0 -#define CTR_IMINLINE_MASK 0xf -#define CTR_ERG_SHIFT 20 -#define CTR_CWG_SHIFT 24 -#define CTR_CWG_MASK 15 -#define CTR_IDC_SHIFT 28 -#define CTR_DIC_SHIFT 29 - -#define ICACHE_POLICY_VPIPT 0 -#define ICACHE_POLICY_AIVIVT 1 -#define ICACHE_POLICY_VIPT 2 -#define ICACHE_POLICY_PIPT 3 - -/* MIDR Main ID Register */ -#define MIDR_REVISION_MASK 0xf -#define MIDR_RESIVION(midr) ((midr) & MIDR_REVISION_MASK) -#define MIDR_PARTNUM_SHIFT 4 -#define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT) -#define MIDR_PARTNUM(midr) \ - (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT) -#define MIDR_ARCHITECTURE_SHIFT 16 -#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT) -#define MIDR_ARCHITECTURE(midr) \ - (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT) -#define MIDR_VARIANT_SHIFT 20 -#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT) -#define MIDR_VARIANT(midr) \ - (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT) -#define MIDR_IMPLEMENTOR_SHIFT 24 -#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT) -#define MIDR_IMPLEMENTOR(midr) \ - (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) - -#define MIDR_CPU_MODEL(imp, partnum) \ - (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ - (0xf << MIDR_ARCHITECTURE_SHIFT) | \ - ((partnum) << MIDR_PARTNUM_SHIFT)) - -#define MIDR_CPU_MODEL_MASK \ - (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | MIDR_ARCHITECTURE_MASK) - -#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \ -({ \ - u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \ - u32 _rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \ - \ - _model == (model) && _rv >= (rv_min) && _rv <= (rv_max); \ -}) - -#define ARM_CPU_IMP_ARM 0x41 - -#define ARM_CPU_PART_CORTEX_A12 0xC0D -#define ARM_CPU_PART_CORTEX_A17 0xC0E -#define ARM_CPU_PART_CORTEX_A15 0xC0F -#define ARM_CPU_PART_CORTEX_A53 0xD03 -#define ARM_CPU_PART_CORTEX_A55 0xD05 -#define ARM_CPU_PART_CORTEX_A57 0xD07 -#define ARM_CPU_PART_CORTEX_A72 0xD08 -#define ARM_CPU_PART_CORTEX_A73 0xD09 -#define ARM_CPU_PART_CORTEX_A75 0xD0A -#define ARM_CPU_PART_CORTEX_A76 0xD0B -#define ARM_CPU_PART_NEOVERSE_N1 0xD0C - -#define MIDR_CORTEX_A12 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A12) -#define MIDR_CORTEX_A17 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A17) -#define MIDR_CORTEX_A15 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A15) -#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) -#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) -#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) -#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) -#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) -#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) -#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) -#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) - -/* MPIDR Multiprocessor Affinity Register */ -#define _MPIDR_UP (30) -#define MPIDR_UP (_AC(1,UL) << _MPIDR_UP) -#define _MPIDR_SMP (31) -#define MPIDR_SMP (_AC(1,UL) << _MPIDR_SMP) -#define MPIDR_AFF0_SHIFT (0) -#define MPIDR_AFF0_MASK (_AC(0xff,UL) << MPIDR_AFF0_SHIFT) -#ifdef CONFIG_ARM_64 -#define MPIDR_HWID_MASK _AC(0xff00ffffff,UL) -#else -#define MPIDR_HWID_MASK _AC(0xffffff,U) -#endif -#define MPIDR_INVALID (~MPIDR_HWID_MASK) -#define MPIDR_LEVEL_BITS (8) - - -/* - * Macros to extract affinity level. picked from kernel - */ - -#define MPIDR_LEVEL_BITS_SHIFT 3 -#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) - -#define MPIDR_LEVEL_SHIFT(level) \ - (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT) - -#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ - ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK) - -#define AFFINITY_MASK(level) ~((_AC(0x1,UL) << MPIDR_LEVEL_SHIFT(level)) - 1) - -/* TTBCR Translation Table Base Control Register */ -#define TTBCR_EAE _AC(0x80000000,U) -#define TTBCR_N_MASK _AC(0x07,U) -#define TTBCR_N_16KB _AC(0x00,U) -#define TTBCR_N_8KB _AC(0x01,U) -#define TTBCR_N_4KB _AC(0x02,U) -#define TTBCR_N_2KB _AC(0x03,U) -#define TTBCR_N_1KB _AC(0x04,U) - -/* - * TTBCR_PD(0|1) can be applied only if LPAE is disabled, i.e., TTBCR.EAE==0 - * (ARM DDI 0487B.a G6-5203 and ARM DDI 0406C.b B4-1722). - */ -#define TTBCR_PD0 (_AC(1,U)<<4) -#define TTBCR_PD1 (_AC(1,U)<<5) - -/* SCTLR System Control Register. */ - -/* Bits specific to SCTLR_EL1 for Arm32 */ - -#define SCTLR_A32_EL1_V BIT(13, UL) - -/* Common bits for SCTLR_ELx for Arm32 */ - -#define SCTLR_A32_ELx_TE BIT(30, UL) -#define SCTLR_A32_ELx_FI BIT(21, UL) - -/* Common bits for SCTLR_ELx for Arm64 */ -#define SCTLR_A64_ELx_SA BIT(3, UL) - -/* Common bits for SCTLR_ELx on all architectures */ -#define SCTLR_Axx_ELx_EE BIT(25, UL) -#define SCTLR_Axx_ELx_WXN BIT(19, UL) -#define SCTLR_Axx_ELx_I BIT(12, UL) -#define SCTLR_Axx_ELx_C BIT(2, UL) -#define SCTLR_Axx_ELx_A BIT(1, UL) -#define SCTLR_Axx_ELx_M BIT(0, UL) - -#ifdef CONFIG_ARM_32 - -#define HSCTLR_RES1 (BIT( 3, UL) | BIT( 4, UL) | BIT( 5, UL) |\ - BIT( 6, UL) | BIT(11, UL) | BIT(16, UL) |\ - BIT(18, UL) | BIT(22, UL) | BIT(23, UL) |\ - BIT(28, UL) | BIT(29, UL)) - -#define HSCTLR_RES0 (BIT(7, UL) | BIT(8, UL) | BIT(9, UL) | BIT(10, UL) |\ - BIT(13, UL) | BIT(14, UL) | BIT(15, UL) | BIT(17, UL) |\ - BIT(20, UL) | BIT(24, UL) | BIT(26, UL) | BIT(27, UL) |\ - BIT(31, UL)) - -/* Initial value for HSCTLR */ -#define HSCTLR_SET (HSCTLR_RES1 | SCTLR_Axx_ELx_A | SCTLR_Axx_ELx_I) - -/* Only used a pre-processing time... */ -#define HSCTLR_CLEAR (HSCTLR_RES0 | SCTLR_Axx_ELx_M |\ - SCTLR_Axx_ELx_C | SCTLR_Axx_ELx_WXN |\ - SCTLR_A32_ELx_FI | SCTLR_Axx_ELx_EE |\ - SCTLR_A32_ELx_TE) - -#if (HSCTLR_SET ^ HSCTLR_CLEAR) != 0xffffffffU -#error "Inconsistent HSCTLR set/clear bits" -#endif - -#else - -#define SCTLR_EL2_RES1 (BIT( 4, UL) | BIT( 5, UL) | BIT(11, UL) |\ - BIT(16, UL) | BIT(18, UL) | BIT(22, UL) |\ - BIT(23, UL) | BIT(28, UL) | BIT(29, UL)) - -#define SCTLR_EL2_RES0 (BIT( 6, UL) | BIT( 7, UL) | BIT( 8, UL) |\ - BIT( 9, UL) | BIT(10, UL) | BIT(13, UL) |\ - BIT(14, UL) | BIT(15, UL) | BIT(17, UL) |\ - BIT(20, UL) | BIT(21, UL) | BIT(24, UL) |\ - BIT(26, UL) | BIT(27, UL) | BIT(30, UL) |\ - BIT(31, UL) | (0xffffffffULL << 32)) - -/* Initial value for SCTLR_EL2 */ -#define SCTLR_EL2_SET (SCTLR_EL2_RES1 | SCTLR_A64_ELx_SA |\ - SCTLR_Axx_ELx_I) - -/* Only used a pre-processing time... */ -#define SCTLR_EL2_CLEAR (SCTLR_EL2_RES0 | SCTLR_Axx_ELx_M |\ - SCTLR_Axx_ELx_A | SCTLR_Axx_ELx_C |\ - SCTLR_Axx_ELx_WXN | SCTLR_Axx_ELx_EE) - -#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL -#error "Inconsistent SCTLR_EL2 set/clear bits" -#endif - -#endif - -/* HCR Hyp Configuration Register */ -#define HCR_RW (_AC(1,UL)<<31) /* Register Width, ARM64 only */ -#define HCR_TGE (_AC(1,UL)<<27) /* Trap General Exceptions */ -#define HCR_TVM (_AC(1,UL)<<26) /* Trap Virtual Memory Controls */ -#define HCR_TTLB (_AC(1,UL)<<25) /* Trap TLB Maintenance Operations */ -#define HCR_TPU (_AC(1,UL)<<24) /* Trap Cache Maintenance Operations to PoU */ -#define HCR_TPC (_AC(1,UL)<<23) /* Trap Cache Maintenance Operations to PoC */ -#define HCR_TSW (_AC(1,UL)<<22) /* Trap Set/Way Cache Maintenance Operations */ -#define HCR_TAC (_AC(1,UL)<<21) /* Trap ACTLR Accesses */ -#define HCR_TIDCP (_AC(1,UL)<<20) /* Trap lockdown */ -#define HCR_TSC (_AC(1,UL)<<19) /* Trap SMC instruction */ -#define HCR_TID3 (_AC(1,UL)<<18) /* Trap ID Register Group 3 */ -#define HCR_TID2 (_AC(1,UL)<<17) /* Trap ID Register Group 2 */ -#define HCR_TID1 (_AC(1,UL)<<16) /* Trap ID Register Group 1 */ -#define HCR_TID0 (_AC(1,UL)<<15) /* Trap ID Register Group 0 */ -#define HCR_TWE (_AC(1,UL)<<14) /* Trap WFE instruction */ -#define HCR_TWI (_AC(1,UL)<<13) /* Trap WFI instruction */ -#define HCR_DC (_AC(1,UL)<<12) /* Default cacheable */ -#define HCR_BSU_MASK (_AC(3,UL)<<10) /* Barrier Shareability Upgrade */ -#define HCR_BSU_NONE (_AC(0,UL)<<10) -#define HCR_BSU_INNER (_AC(1,UL)<<10) -#define HCR_BSU_OUTER (_AC(2,UL)<<10) -#define HCR_BSU_FULL (_AC(3,UL)<<10) -#define HCR_FB (_AC(1,UL)<<9) /* Force Broadcast of Cache/BP/TLB operations */ -#define HCR_VA (_AC(1,UL)<<8) /* Virtual Asynchronous Abort */ -#define HCR_VI (_AC(1,UL)<<7) /* Virtual IRQ */ -#define HCR_VF (_AC(1,UL)<<6) /* Virtual FIQ */ -#define HCR_AMO (_AC(1,UL)<<5) /* Override CPSR.A */ -#define HCR_IMO (_AC(1,UL)<<4) /* Override CPSR.I */ -#define HCR_FMO (_AC(1,UL)<<3) /* Override CPSR.F */ -#define HCR_PTW (_AC(1,UL)<<2) /* Protected Walk */ -#define HCR_SWIO (_AC(1,UL)<<1) /* Set/Way Invalidation Override */ -#define HCR_VM (_AC(1,UL)<<0) /* Virtual MMU Enable */ - -/* TCR: Stage 1 Translation Control */ - -#define TCR_T0SZ_SHIFT (0) -#define TCR_T1SZ_SHIFT (16) -#define TCR_T0SZ(x) ((x)< */ - -/* HDCR Hyp. Debug Configuration Register */ -#define HDCR_TDRA (_AC(1,U)<<11) /* Trap Debug ROM access */ -#define HDCR_TDOSA (_AC(1,U)<<10) /* Trap Debug-OS-related register access */ -#define HDCR_TDA (_AC(1,U)<<9) /* Trap Debug Access */ -#define HDCR_TDE (_AC(1,U)<<8) /* Route Soft Debug exceptions from EL1/EL1 to EL2 */ -#define HDCR_TPM (_AC(1,U)<<6) /* Trap Performance Monitors accesses */ -#define HDCR_TPMCR (_AC(1,U)<<5) /* Trap PMCR accesses */ - -#define HSR_EC_SHIFT 26 - -#define HSR_EC_UNKNOWN 0x00 -#define HSR_EC_WFI_WFE 0x01 -#define HSR_EC_CP15_32 0x03 -#define HSR_EC_CP15_64 0x04 -#define HSR_EC_CP14_32 0x05 /* Trapped MCR or MRC access to CP14 */ -#define HSR_EC_CP14_DBG 0x06 /* Trapped LDC/STC access to CP14 (only for debug registers) */ -#define HSR_EC_CP 0x07 /* HCPTR-trapped access to CP0-CP13 */ -#define HSR_EC_CP10 0x08 -#define HSR_EC_JAZELLE 0x09 -#define HSR_EC_BXJ 0x0a -#define HSR_EC_CP14_64 0x0c -#define HSR_EC_SVC32 0x11 -#define HSR_EC_HVC32 0x12 -#define HSR_EC_SMC32 0x13 -#ifdef CONFIG_ARM_64 -#define HSR_EC_SVC64 0x15 -#define HSR_EC_HVC64 0x16 -#define HSR_EC_SMC64 0x17 -#define HSR_EC_SYSREG 0x18 -#endif -#define HSR_EC_INSTR_ABORT_LOWER_EL 0x20 -#define HSR_EC_INSTR_ABORT_CURR_EL 0x21 -#define HSR_EC_DATA_ABORT_LOWER_EL 0x24 -#define HSR_EC_DATA_ABORT_CURR_EL 0x25 -#ifdef CONFIG_ARM_64 -#define HSR_EC_BRK 0x3c -#endif - -/* FSR format, common */ -#define FSR_LPAE (_AC(1,UL)<<9) -/* FSR short format */ -#define FSRS_FS_DEBUG (_AC(0,UL)<<10|_AC(0x2,UL)<<0) -/* FSR long format */ -#define FSRL_STATUS_DEBUG (_AC(0x22,UL)<<0) - -#ifdef CONFIG_ARM_64 -#define MM64_VMID_8_BITS_SUPPORT 0x0 -#define MM64_VMID_16_BITS_SUPPORT 0x2 -#endif - -#ifndef __ASSEMBLY__ - -extern register_t __cpu_logical_map[]; -#define cpu_logical_map(cpu) __cpu_logical_map[cpu] - -#endif - -/* Physical Address Register */ -#define PAR_F (_AC(1,U)<<0) - -/* .... If F == 1 */ -#define PAR_FSC_SHIFT (1) -#define PAR_FSC_MASK (_AC(0x3f,U)< -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -#ifndef __ASSEMBLY__ -void panic_PAR(uint64_t par); - -void show_execution_state(const struct cpu_user_regs *regs); -void show_registers(const struct cpu_user_regs *regs); -//#define dump_execution_state() run_in_exception_handler(show_execution_state) -#define dump_execution_state() WARN() - -#define cpu_relax() barrier() /* Could yield? */ - -/* All a bit UP for the moment */ -#define cpu_to_core(_cpu) (0) -#define cpu_to_socket(_cpu) (0) - -struct vcpu; -void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, - struct vcpu_guest_core_regs *regs); -void vcpu_regs_user_to_hyp(struct vcpu *vcpu, - const struct vcpu_guest_core_regs *regs); - -void do_trap_hyp_serror(struct cpu_user_regs *regs); - -void do_trap_guest_serror(struct cpu_user_regs *regs); - -register_t get_default_hcr_flags(void); - -/* - * Synchronize SError unless the feature is selected. - * This is relying on the SErrors are currently unmasked. - */ -#define SYNCHRONIZE_SERROR(feat) \ - do { \ - ASSERT(local_abort_is_enabled()); \ - asm volatile(ALTERNATIVE("dsb sy; isb", \ - "nop; nop", feat) \ - : : : "memory"); \ - } while (0) - -/* - * Clear/Set flags in HCR_EL2 for a given vCPU. It only supports the current - * vCPU for now. - */ -#define vcpu_hcr_clear_flags(v, flags) \ - do { \ - ASSERT((v) == current); \ - (v)->arch.hcr_el2 &= ~(flags); \ - WRITE_SYSREG((v)->arch.hcr_el2, HCR_EL2); \ - } while (0) - -#define vcpu_hcr_set_flags(v, flags) \ - do { \ - ASSERT((v) == current); \ - (v)->arch.hcr_el2 |= (flags); \ - WRITE_SYSREG((v)->arch.hcr_el2, HCR_EL2); \ - } while (0) - -#endif /* __ASSEMBLY__ */ -#endif /* __ASM_ARM_PROCESSOR_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/procinfo.h b/xen/include/asm-arm/procinfo.h deleted file mode 100644 index 02be56e348..0000000000 --- a/xen/include/asm-arm/procinfo.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * include/asm-arm/procinfo.h - * - * Bamvor Jian Zhang - * Copyright (c) 2013 SUSE - * - * base on linux/arch/arm/include/asm/procinfo.h - * Copyright (C) 1996-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_PROCINFO_H -#define __ASM_ARM_PROCINFO_H - -#include - -struct processor { - /* Initialize specific processor register for the new VPCU*/ - void (*vcpu_initialise)(struct vcpu *v); -}; - -struct proc_info_list { - unsigned int cpu_val; - unsigned int cpu_mask; - void (*cpu_init)(void); - struct processor *processor; -}; - -const struct proc_info_list *lookup_processor_type(void); - -void processor_setup(void); -void processor_vcpu_initialise(struct vcpu *v); - -#endif diff --git a/xen/include/asm-arm/psci.h b/xen/include/asm-arm/psci.h deleted file mode 100644 index 832f77afff..0000000000 --- a/xen/include/asm-arm/psci.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef __ASM_PSCI_H__ -#define __ASM_PSCI_H__ - -#include - -/* PSCI return values (inclusive of all PSCI versions) */ -#define PSCI_SUCCESS 0 -#define PSCI_NOT_SUPPORTED -1 -#define PSCI_INVALID_PARAMETERS -2 -#define PSCI_DENIED -3 -#define PSCI_ALREADY_ON -4 -#define PSCI_ON_PENDING -5 -#define PSCI_INTERNAL_FAILURE -6 -#define PSCI_NOT_PRESENT -7 -#define PSCI_DISABLED -8 -#define PSCI_INVALID_ADDRESS -9 - -/* availability of PSCI on the host for SMP bringup */ -extern uint32_t psci_ver; - -int psci_init(void); -int call_psci_cpu_on(int cpu); -void call_psci_cpu_off(void); -void call_psci_system_off(void); -void call_psci_system_reset(void); - -/* PSCI v0.2 interface */ -#define PSCI_0_2_FN32(nr) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_STANDARD, \ - nr) -#define PSCI_0_2_FN64(nr) ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_64, \ - ARM_SMCCC_OWNER_STANDARD, \ - nr) - -#define PSCI_0_2_FN32_PSCI_VERSION PSCI_0_2_FN32(0) -#define PSCI_0_2_FN32_CPU_SUSPEND PSCI_0_2_FN32(1) -#define PSCI_0_2_FN32_CPU_OFF PSCI_0_2_FN32(2) -#define PSCI_0_2_FN32_CPU_ON PSCI_0_2_FN32(3) -#define PSCI_0_2_FN32_AFFINITY_INFO PSCI_0_2_FN32(4) -#define PSCI_0_2_FN32_MIGRATE_INFO_TYPE PSCI_0_2_FN32(6) -#define PSCI_0_2_FN32_SYSTEM_OFF PSCI_0_2_FN32(8) -#define PSCI_0_2_FN32_SYSTEM_RESET PSCI_0_2_FN32(9) -#define PSCI_1_0_FN32_PSCI_FEATURES PSCI_0_2_FN32(10) - -#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1) -#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3) -#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4) - -/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */ -#define PSCI_0_2_AFFINITY_LEVEL_ON 0 -#define PSCI_0_2_AFFINITY_LEVEL_OFF 1 -#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2 - -/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */ -#define PSCI_0_2_TOS_UP_MIGRATE_CAPABLE 0 -#define PSCI_0_2_TOS_UP_NOT_MIGRATE_CAPABLE 1 -#define PSCI_0_2_TOS_MP_OR_NOT_PRESENT 2 - -/* PSCI v0.2 power state encoding for CPU_SUSPEND function */ -#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff -#define PSCI_0_2_POWER_STATE_ID_SHIFT 0 -#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16 -#define PSCI_0_2_POWER_STATE_TYPE_MASK \ - (0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT) - -/* PSCI version decoding (independent of PSCI version) */ -#define PSCI_VERSION_MAJOR_SHIFT 16 -#define PSCI_VERSION_MINOR_MASK \ - ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1) -#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK -#define PSCI_VERSION_MAJOR(ver) \ - (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) -#define PSCI_VERSION_MINOR(ver) \ - ((ver) & PSCI_VERSION_MINOR_MASK) - -#define PSCI_VERSION(major, minor) \ - (((major) << PSCI_VERSION_MAJOR_SHIFT) | (minor)) - -#endif /* __ASM_PSCI_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/random.h b/xen/include/asm-arm/random.h deleted file mode 100644 index b4acee276b..0000000000 --- a/xen/include/asm-arm/random.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_RANDOM_H__ -#define __ASM_RANDOM_H__ - -static inline unsigned int arch_get_random(void) -{ - return 0; -} - -#endif /* __ASM_RANDOM_H__ */ diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h deleted file mode 100644 index ec091a28a2..0000000000 --- a/xen/include/asm-arm/regs.h +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef __ARM_REGS_H__ -#define __ARM_REGS_H__ - -#define PSR_MODE_MASK 0x1f - -#ifndef __ASSEMBLY__ - -#include -#include -#include -#include -#include - -#define psr_mode(psr,m) (((psr) & PSR_MODE_MASK) == m) - -static inline bool psr_mode_is_32bit(const struct cpu_user_regs *regs) -{ -#ifdef CONFIG_ARM_32 - return true; -#else - return !!(regs->cpsr & PSR_MODE_BIT); -#endif -} - -#define usr_mode(r) psr_mode((r)->cpsr,PSR_MODE_USR) -#define fiq_mode(r) psr_mode((r)->cpsr,PSR_MODE_FIQ) -#define irq_mode(r) psr_mode((r)->cpsr,PSR_MODE_IRQ) -#define svc_mode(r) psr_mode((r)->cpsr,PSR_MODE_SVC) -#define mon_mode(r) psr_mode((r)->cpsr,PSR_MODE_MON) -#define abt_mode(r) psr_mode((r)->cpsr,PSR_MODE_ABT) -#define und_mode(r) psr_mode((r)->cpsr,PSR_MODE_UND) -#define sys_mode(r) psr_mode((r)->cpsr,PSR_MODE_SYS) - -#ifdef CONFIG_ARM_32 -#define hyp_mode(r) psr_mode((r)->cpsr,PSR_MODE_HYP) -#define psr_mode_is_user(r) usr_mode(r) -#else -#define hyp_mode(r) (psr_mode((r)->cpsr,PSR_MODE_EL2h) || \ - psr_mode((r)->cpsr,PSR_MODE_EL2t)) - -/* - * Trap may have been taken from EL0, which might be in AArch32 usr - * mode, or in AArch64 mode (PSR_MODE_EL0t). - */ -#define psr_mode_is_user(r) \ - (psr_mode((r)->cpsr,PSR_MODE_EL0t) || usr_mode(r)) -#endif - -static inline bool guest_mode(const struct cpu_user_regs *r) -{ - unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r); - /* Frame pointer must point into current CPU stack. */ - ASSERT(diff < STACK_SIZE); - /* If not a guest frame, it must be a hypervisor frame. */ - ASSERT((diff == 0) || hyp_mode(r)); - /* Return TRUE if it's a guest frame. */ - return (diff == 0); -} - -register_t get_user_reg(struct cpu_user_regs *regs, int reg); -void set_user_reg(struct cpu_user_regs *regs, int reg, register_t val); - -#endif - -#endif /* __ARM_REGS_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/scif-uart.h b/xen/include/asm-arm/scif-uart.h deleted file mode 100644 index bce3404898..0000000000 --- a/xen/include/asm-arm/scif-uart.h +++ /dev/null @@ -1,127 +0,0 @@ -/* - * xen/include/asm-arm/scif-uart.h - * - * Common constant definition between early printk and the UART driver - * for the SCIF(A) compatible UART. - * - * Oleksandr Tyshchenko - * Copyright (C) 2014, Globallogic. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_SCIF_UART_H -#define __ASM_ARM_SCIF_UART_H - -/* Register offsets (SCIF) */ -#define SCIF_SCSMR (0x00) /* Serial mode register */ -#define SCIF_SCBRR (0x04) /* Bit rate register */ -#define SCIF_SCSCR (0x08) /* Serial control register */ -#define SCIF_SCFTDR (0x0C) /* Transmit FIFO data register */ -#define SCIF_SCFSR (0x10) /* Serial status register */ -#define SCIF_SCFRDR (0x14) /* Receive FIFO data register */ -#define SCIF_SCFCR (0x18) /* FIFO control register */ -#define SCIF_SCFDR (0x1C) /* FIFO data count register */ -#define SCIF_SCSPTR (0x20) /* Serial port register */ -#define SCIF_SCLSR (0x24) /* Line status register */ -#define SCIF_DL (0x30) /* Frequency division register */ -#define SCIF_CKS (0x34) /* Clock Select register */ - -/* Serial Control Register (SCSCR) */ -#define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */ -#define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */ -#define SCSCR_TE (1 << 5) /* Transmit Enable */ -#define SCSCR_RE (1 << 4) /* Receive Enable */ -#define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable */ -#define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable */ -#define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */ -#define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */ - -/* Serial Status Register (SCFSR) */ -#define SCFSR_ER (1 << 7) /* Receive Error */ -#define SCFSR_TEND (1 << 6) /* Transmission End */ -#define SCFSR_TDFE (1 << 5) /* Transmit FIFO Data Empty */ -#define SCFSR_BRK (1 << 4) /* Break Detect */ -#define SCFSR_FER (1 << 3) /* Framing Error */ -#define SCFSR_PER (1 << 2) /* Parity Error */ -#define SCFSR_RDF (1 << 1) /* Receive FIFO Data Full */ -#define SCFSR_DR (1 << 0) /* Receive Data Ready */ - -/* Line Status Register (SCLSR) */ -#define SCLSR_TO (1 << 2) /* Timeout */ -#define SCLSR_ORER (1 << 0) /* Overrun Error */ - -/* FIFO Control Register (SCFCR) */ -#define SCFCR_RTRG1 (1 << 7) /* Receive FIFO Data Count Trigger 1 */ -#define SCFCR_RTRG0 (1 << 6) /* Receive FIFO Data Count Trigger 0 */ -#define SCFCR_TTRG1 (1 << 5) /* Transmit FIFO Data Count Trigger 1 */ -#define SCFCR_TTRG0 (1 << 4) /* Transmit FIFO Data Count Trigger 0 */ -#define SCFCR_MCE (1 << 3) /* Modem Control Enable */ -#define SCFCR_TFRST (1 << 2) /* Transmit FIFO Data Register Reset */ -#define SCFCR_RFRST (1 << 1) /* Receive FIFO Data Register Reset */ -#define SCFCR_LOOP (1 << 0) /* Loopback Test */ - -#define SCFCR_RTRG00 (0) -#define SCFCR_RTRG01 (SCFCR_RTRG0) -#define SCFCR_RTRG10 (SCFCR_RTRG1) -#define SCFCR_RTRG11 (SCFCR_RTRG1 | SCFCR_RTRG0) - -#define SCFCR_TTRG00 (0) -#define SCFCR_TTRG01 (SCFCR_TTRG0) -#define SCFCR_TTRG10 (SCFCR_TTRG1) -#define SCFCR_TTRG11 (SCFCR_TTRG1 | SCFCR_TTRG0) - -/* Register offsets (SCIFA) */ -#define SCIFA_SCASMR (0x00) /* Serial mode register */ -#define SCIFA_SCABRR (0x04) /* Bit rate register */ -#define SCIFA_SCASCR (0x08) /* Serial control register */ -#define SCIFA_SCATDSR (0x0C) /* Transmit data stop register */ -#define SCIFA_SCAFER (0x10) /* FIFO error count register */ -#define SCIFA_SCASSR (0x14) /* Serial status register */ -#define SCIFA_SCAFCR (0x18) /* FIFO control register */ -#define SCIFA_SCAFDR (0x1C) /* FIFO data count register */ -#define SCIFA_SCAFTDR (0x20) /* Transmit FIFO data register */ -#define SCIFA_SCAFRDR (0x24) /* Receive FIFO data register */ -#define SCIFA_SCAPCR (0x30) /* Serial port control register */ -#define SCIFA_SCAPDR (0x34) /* Serial port data register */ - -/* Serial Control Register (SCASCR) */ -#define SCASCR_ERIE (1 << 10) /* Receive Error Interrupt Enable */ -#define SCASCR_BRIE (1 << 9) /* Break Interrupt Enable */ -#define SCASCR_DRIE (1 << 8) /* Receive Data Ready Interrupt Enable */ -#define SCASCR_TIE (1 << 7) /* Transmit Interrupt Enable */ -#define SCASCR_RIE (1 << 6) /* Receive Interrupt Enable */ -#define SCASCR_TE (1 << 5) /* Transmit Enable */ -#define SCASCR_RE (1 << 4) /* Receive Enable */ -#define SCASCR_CKE0 (1 << 0) /* Clock Enable 0 */ - -/* Serial Status Register (SCASSR) */ -#define SCASSR_ORER (1 << 9) /* Overrun Error */ -#define SCASSR_TSF (1 << 8) /* Transmit Data Stop */ -#define SCASSR_ER (1 << 7) /* Receive Error */ -#define SCASSR_TEND (1 << 6) /* Transmission End */ -#define SCASSR_TDFE (1 << 5) /* Transmit FIFO Data Empty */ -#define SCASSR_BRK (1 << 4) /* Break Detect */ -#define SCASSR_FER (1 << 3) /* Framing Error */ -#define SCASSR_PER (1 << 2) /* Parity Error */ -#define SCASSR_RDF (1 << 1) /* Receive FIFO Data Full */ -#define SCASSR_DR (1 << 0) /* Receive Data Ready */ - -#endif /* __ASM_ARM_SCIF_UART_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/setup.h b/xen/include/asm-arm/setup.h deleted file mode 100644 index 95da0b7ab9..0000000000 --- a/xen/include/asm-arm/setup.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef __ARM_SETUP_H_ -#define __ARM_SETUP_H_ - -#include - -#define MIN_FDT_ALIGN 8 -#define MAX_FDT_SIZE SZ_2M - -#define NR_MEM_BANKS 128 - -#define MAX_MODULES 32 /* Current maximum useful modules */ - -typedef enum { - BOOTMOD_XEN, - BOOTMOD_FDT, - BOOTMOD_KERNEL, - BOOTMOD_RAMDISK, - BOOTMOD_XSM, - BOOTMOD_GUEST_DTB, - BOOTMOD_UNKNOWN -} bootmodule_kind; - - -struct membank { - paddr_t start; - paddr_t size; - bool xen_domain; /* whether the memory bank is bound to a Xen domain. */ -}; - -struct meminfo { - int nr_banks; - struct membank bank[NR_MEM_BANKS]; -}; - -/* - * The domU flag is set for kernels and ramdisks of "xen,domain" nodes. - * The purpose of the domU flag is to avoid getting confused in - * kernel_probe, where we try to guess which is the dom0 kernel and - * initrd to be compatible with all versions of the multiboot spec. - */ -#define BOOTMOD_MAX_CMDLINE 1024 -struct bootmodule { - bootmodule_kind kind; - bool domU; - paddr_t start; - paddr_t size; -}; - -/* DT_MAX_NAME is the node name max length according the DT spec */ -#define DT_MAX_NAME 41 -struct bootcmdline { - bootmodule_kind kind; - bool domU; - paddr_t start; - char dt_name[DT_MAX_NAME]; - char cmdline[BOOTMOD_MAX_CMDLINE]; -}; - -struct bootmodules { - int nr_mods; - struct bootmodule module[MAX_MODULES]; -}; - -struct bootcmdlines { - unsigned int nr_mods; - struct bootcmdline cmdline[MAX_MODULES]; -}; - -struct bootinfo { - struct meminfo mem; - /* The reserved regions are only used when booting using Device-Tree */ - struct meminfo reserved_mem; - struct bootmodules modules; - struct bootcmdlines cmdlines; -#ifdef CONFIG_ACPI - struct meminfo acpi; -#endif -}; - -extern struct bootinfo bootinfo; - -extern domid_t max_init_domid; - -void copy_from_paddr(void *dst, paddr_t paddr, unsigned long len); - -size_t estimate_efi_size(int mem_nr_banks); - -void acpi_create_efi_system_table(struct domain *d, - struct membank tbl_add[]); - -void acpi_create_efi_mmap_table(struct domain *d, - const struct meminfo *mem, - struct membank tbl_add[]); - -int acpi_make_efi_nodes(void *fdt, struct membank tbl_add[]); - -void create_domUs(void); -void create_dom0(void); - -void discard_initial_modules(void); -void fw_unreserved_regions(paddr_t s, paddr_t e, - void (*cb)(paddr_t, paddr_t), int first); - -size_t boot_fdt_info(const void *fdt, paddr_t paddr); -const char *boot_fdt_cmdline(const void *fdt); - -struct bootmodule *add_boot_module(bootmodule_kind kind, - paddr_t start, paddr_t size, bool domU); -struct bootmodule *boot_module_find_by_kind(bootmodule_kind kind); -struct bootmodule * boot_module_find_by_addr_and_kind(bootmodule_kind kind, - paddr_t start); -void add_boot_cmdline(const char *name, const char *cmdline, - bootmodule_kind kind, paddr_t start, bool domU); -struct bootcmdline *boot_cmdline_find_by_kind(bootmodule_kind kind); -struct bootcmdline * boot_cmdline_find_by_name(const char *name); -const char *boot_module_kind_as_string(bootmodule_kind kind); - -extern uint32_t hyp_traps_vector[]; -void init_traps(void); - -void device_tree_get_reg(const __be32 **cell, u32 address_cells, - u32 size_cells, u64 *start, u64 *size); - -u32 device_tree_get_u32(const void *fdt, int node, - const char *prop_name, u32 dflt); - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/short-desc.h b/xen/include/asm-arm/short-desc.h deleted file mode 100644 index 9652a103c4..0000000000 --- a/xen/include/asm-arm/short-desc.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef __ARM_SHORT_DESC_H__ -#define __ARM_SHORT_DESC_H__ - -/* - * First level translation table descriptor types used by the AArch32 - * short-descriptor translation table format. - */ -#define L1DESC_INVALID (0) -#define L1DESC_PAGE_TABLE (1) -#define L1DESC_SECTION (2) -#define L1DESC_SECTION_PXN (3) - -/* Defines for section and supersection shifts. */ -#define L1DESC_SECTION_SHIFT (20) -#define L1DESC_SUPERSECTION_SHIFT (24) -#define L1DESC_SUPERSECTION_EXT_BASE1_SHIFT (32) -#define L1DESC_SUPERSECTION_EXT_BASE2_SHIFT (36) - -/* Second level translation table descriptor types. */ -#define L2DESC_INVALID (0) - -/* Defines for small (4K) and large page (64K) shifts. */ -#define L2DESC_SMALL_PAGE_SHIFT (12) -#define L2DESC_LARGE_PAGE_SHIFT (16) - -/* - * Comprises bits of the level 1 short-descriptor format representing - * a section. - */ -typedef struct __packed { - bool pxn:1; /* Privileged Execute Never */ - bool sec:1; /* == 1 if section or supersection */ - bool b:1; /* Bufferable */ - bool c:1; /* Cacheable */ - bool xn:1; /* Execute Never */ - unsigned int dom:4; /* Domain field */ - bool impl:1; /* Implementation defined */ - unsigned int ap:2; /* AP[1:0] */ - unsigned int tex:3; /* TEX[2:0] */ - bool ro:1; /* AP[2] */ - bool s:1; /* Shareable */ - bool ng:1; /* Non-global */ - bool supersec:1; /* Must be 0 for sections */ - bool ns:1; /* Non-secure */ - unsigned int base:12; /* Section base address */ -} short_desc_l1_sec_t; - -/* - * Comprises bits of the level 1 short-descriptor format representing - * a supersection. - */ -typedef struct __packed { - bool pxn:1; /* Privileged Execute Never */ - bool sec:1; /* == 1 if section or supersection */ - bool b:1; /* Bufferable */ - bool c:1; /* Cacheable */ - bool xn:1; /* Execute Never */ - unsigned int extbase2:4; /* Extended base address, PA[39:36] */ - bool impl:1; /* Implementation defined */ - unsigned int ap:2; /* AP[1:0] */ - unsigned int tex:3; /* TEX[2:0] */ - bool ro:1; /* AP[2] */ - bool s:1; /* Shareable */ - bool ng:1; /* Non-global */ - bool supersec:1; /* Must be 0 for sections */ - bool ns:1; /* Non-secure */ - unsigned int extbase1:4; /* Extended base address, PA[35:32] */ - unsigned int base:8; /* Supersection base address */ -} short_desc_l1_supersec_t; - -/* - * Comprises bits of the level 2 short-descriptor format representing - * a small page. - */ -typedef struct __packed { - bool xn:1; /* Execute Never */ - bool page:1; /* ==1 if small page */ - bool b:1; /* Bufferable */ - bool c:1; /* Cacheable */ - unsigned int ap:2; /* AP[1:0] */ - unsigned int tex:3; /* TEX[2:0] */ - bool ro:1; /* AP[2] */ - bool s:1; /* Shareable */ - bool ng:1; /* Non-global */ - unsigned int base:20; /* Small page base address */ -} short_desc_l2_page_t; - -/* - * Comprises bits of the level 2 short-descriptor format representing - * a large page. - */ -typedef struct __packed { - bool lpage:1; /* ==1 if large page */ - bool page:1; /* ==0 if large page */ - bool b:1; /* Bufferable */ - bool c:1; /* Cacheable */ - unsigned int ap:2; /* AP[1:0] */ - unsigned int sbz:3; /* Should be zero */ - bool ro:1; /* AP[2] */ - bool s:1; /* Shareable */ - bool ng:1; /* Non-global */ - unsigned int tex:3; /* TEX[2:0] */ - bool xn:1; /* Execute Never */ - unsigned int base:16; /* Large page base address */ -} short_desc_l2_lpage_t; - -/* - * Comprises the bits required to walk page tables adhering to the - * short-descriptor translation table format. - */ -typedef struct __packed { - unsigned int dt:2; /* Descriptor type */ - unsigned int pad1:8; - unsigned int base:22; /* Base address of block or next table */ -} short_desc_walk_t; - -/* - * Represents page table entries adhering to the short-descriptor translation - * table format. - */ -typedef union { - uint32_t bits; - short_desc_walk_t walk; - short_desc_l1_sec_t sec; - short_desc_l1_supersec_t supersec; - short_desc_l2_page_t pg; - short_desc_l2_lpage_t lpg; -} short_desc_t; - -#endif /* __ARM_SHORT_DESC_H__ */ diff --git a/xen/include/asm-arm/smccc.h b/xen/include/asm-arm/smccc.h deleted file mode 100644 index 9d94beb3df..0000000000 --- a/xen/include/asm-arm/smccc.h +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (c) 2015, Linaro Limited - * Copyright (c) 2017, EPAM Systems - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __ASM_ARM_SMCCC_H__ -#define __ASM_ARM_SMCCC_H__ - -#include -#include - -#define SMCCC_VERSION_MAJOR_SHIFT 16 -#define SMCCC_VERSION_MINOR_MASK \ - ((1U << SMCCC_VERSION_MAJOR_SHIFT) - 1) -#define SMCCC_VERSION_MAJOR_MASK ~SMCCC_VERSION_MINOR_MASK -#define SMCCC_VERSION_MAJOR(ver) \ - (((ver) & SMCCC_VERSION_MAJOR_MASK) >> SMCCC_VERSION_MAJOR_SHIFT) -#define SMCCC_VERSION_MINOR(ver) \ - ((ver) & SMCCC_VERSION_MINOR_MASK) - -#define SMCCC_VERSION(major, minor) \ - (((major) << SMCCC_VERSION_MAJOR_SHIFT) | (minor)) - -#define ARM_SMCCC_VERSION_1_0 SMCCC_VERSION(1, 0) -#define ARM_SMCCC_VERSION_1_1 SMCCC_VERSION(1, 1) - -/* - * This file provides common defines for ARM SMC Calling Convention as - * specified in - * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html - */ - -#define ARM_SMCCC_STD_CALL _AC(0,U) -#define ARM_SMCCC_FAST_CALL _AC(1,U) -#define ARM_SMCCC_TYPE_SHIFT 31 - -#define ARM_SMCCC_CONV_32 _AC(0,U) -#define ARM_SMCCC_CONV_64 _AC(1,U) -#define ARM_SMCCC_CONV_SHIFT 30 - -#define ARM_SMCCC_OWNER_MASK _AC(0x3F,U) -#define ARM_SMCCC_OWNER_SHIFT 24 - -#define ARM_SMCCC_FUNC_MASK _AC(0xFFFF,U) - -#ifndef __ASSEMBLY__ - -extern uint32_t smccc_ver; - -/* Check if this is fast call. */ -static inline bool smccc_is_fast_call(register_t funcid) -{ - return funcid & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT); -} - -/* Chek if this is 64-bit call. */ -static inline bool smccc_is_conv_64(register_t funcid) -{ - return funcid & (ARM_SMCCC_CONV_64 << ARM_SMCCC_CONV_SHIFT); -} - -/* Get function number from function identifier. */ -static inline uint32_t smccc_get_fn(register_t funcid) -{ - return funcid & ARM_SMCCC_FUNC_MASK; -} - -/* Get service owner number from function identifier. */ -static inline uint32_t smccc_get_owner(register_t funcid) -{ - return (funcid >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK; -} - -/* - * struct arm_smccc_res - Result from SMC call - * @a0 - @a3 result values from registers 0 to 3 - */ -struct arm_smccc_res { - unsigned long a0; - unsigned long a1; - unsigned long a2; - unsigned long a3; -}; - -/* SMCCC v1.1 implementation madness follows */ -#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x - -#define __count_args(...) \ - ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) - -#define __constraint_write_0 \ - "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) -#define __constraint_write_1 \ - "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) -#define __constraint_write_2 \ - "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) -#define __constraint_write_3 \ - "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) -#define __constraint_write_4 __constraint_write_3 -#define __constraint_write_5 __constraint_write_4 -#define __constraint_write_6 __constraint_write_5 -#define __constraint_write_7 __constraint_write_6 - -#define __constraint_read_0 -#define __constraint_read_1 -#define __constraint_read_2 -#define __constraint_read_3 -#define __constraint_read_4 "r" (r4) -#define __constraint_read_5 __constraint_read_4, "r" (r5) -#define __constraint_read_6 __constraint_read_5, "r" (r6) -#define __constraint_read_7 __constraint_read_6, "r" (r7) - -#define __declare_arg_0(a0, res) \ - struct arm_smccc_res *___res = res; \ - register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ - register unsigned long r1 ASM_REG(1); \ - register unsigned long r2 ASM_REG(2); \ - register unsigned long r3 ASM_REG(3) - -#define __declare_arg_1(a0, a1, res) \ - typeof(a1) __a1 = a1; \ - struct arm_smccc_res *___res = res; \ - register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ - register unsigned long r1 ASM_REG(1) = __a1; \ - register unsigned long r2 ASM_REG(2); \ - register unsigned long r3 ASM_REG(3) - -#define __declare_arg_2(a0, a1, a2, res) \ - typeof(a1) __a1 = a1; \ - typeof(a2) __a2 = a2; \ - struct arm_smccc_res *___res = res; \ - register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ - register unsigned long r1 ASM_REG(1) = __a1; \ - register unsigned long r2 ASM_REG(2) = __a2; \ - register unsigned long r3 ASM_REG(3) - -#define __declare_arg_3(a0, a1, a2, a3, res) \ - typeof(a1) __a1 = a1; \ - typeof(a2) __a2 = a2; \ - typeof(a3) __a3 = a3; \ - struct arm_smccc_res *___res = res; \ - register unsigned long r0 ASM_REG(0) = (uint32_t)a0; \ - register unsigned long r1 ASM_REG(1) = __a1; \ - register unsigned long r2 ASM_REG(2) = __a2; \ - register unsigned long r3 ASM_REG(3) = __a3 - -#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ - typeof(a4) __a4 = a4; \ - __declare_arg_3(a0, a1, a2, a3, res); \ - register unsigned long r4 ASM_REG(4) = __a4 - -#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ - typeof(a5) __a5 = a5; \ - __declare_arg_4(a0, a1, a2, a3, a4, res); \ - register typeof(a5) r5 ASM_REG(5) = __a5 - -#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ - typeof(a6) __a6 = a6; \ - __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ - register typeof(a6) r6 ASM_REG(6) = __a6 - -#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ - typeof(a7) __a7 = a7; \ - __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ - register typeof(a7) r7 ASM_REG(7) = __a7 - -#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) -#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) - -#define ___constraints(count) \ - : __constraint_write_ ## count \ - : __constraint_read_ ## count \ - : "memory" -#define __constraints(count) ___constraints(count) - -/* - * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call - * - * This is a variadic macro taking one to eight source arguments, and - * an optional return structure. - * - * @a0-a7: arguments passed in registers 0 to 7 - * @res: result values from registers 0 to 3 - * - * This macro is used to make SMC calls following SMC Calling Convention v1.1. - * The content of the supplied param are copied to registers 0 to 7 prior - * to the SMC instruction. The return values are updated with the content - * from register 0 to 3 on return from the SMC instruction if not NULL. - * - * We have an output list that is not necessarily used, and GCC feels - * entitled to optimise the whole sequence away. "volatile" is what - * makes it stick. - */ -#define arm_smccc_1_1_smc(...) \ - do { \ - __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm volatile("smc #0\n" \ - __constraints(__count_args(__VA_ARGS__))); \ - if ( ___res ) \ - *___res = (typeof(*___res)){r0, r1, r2, r3}; \ - } while ( 0 ) - -/* - * The calling convention for arm32 is the same for both SMCCC v1.0 and - * v1.1. - */ -#ifdef CONFIG_ARM_32 -#define arm_smccc_1_0_smc(...) arm_smccc_1_1_smc(__VA_ARGS__) -#define arm_smccc_smc(...) arm_smccc_1_1_smc(__VA_ARGS__) -#else - -void __arm_smccc_1_0_smc(register_t a0, register_t a1, register_t a2, - register_t a3, register_t a4, register_t a5, - register_t a6, register_t a7, - struct arm_smccc_res *res); - -/* Macros to handle variadic parameter for SMCCC v1.0 helper */ -#define __arm_smccc_1_0_smc_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ - __arm_smccc_1_0_smc(a0, a1, a2, a3, a4, a5, a6, a7, res) - -#define __arm_smccc_1_0_smc_6(a0, a1, a2, a3, a4, a5, a6, res) \ - __arm_smccc_1_0_smc_7(a0, a1, a2, a3, a4, a5, a6, 0, res) - -#define __arm_smccc_1_0_smc_5(a0, a1, a2, a3, a4, a5, res) \ - __arm_smccc_1_0_smc_6(a0, a1, a2, a3, a4, a5, 0, res) - -#define __arm_smccc_1_0_smc_4(a0, a1, a2, a3, a4, res) \ - __arm_smccc_1_0_smc_5(a0, a1, a2, a3, a4, 0, res) - -#define __arm_smccc_1_0_smc_3(a0, a1, a2, a3, res) \ - __arm_smccc_1_0_smc_4(a0, a1, a2, a3, 0, res) - -#define __arm_smccc_1_0_smc_2(a0, a1, a2, res) \ - __arm_smccc_1_0_smc_3(a0, a1, a2, 0, res) - -#define __arm_smccc_1_0_smc_1(a0, a1, res) \ - __arm_smccc_1_0_smc_2(a0, a1, 0, res) - -#define __arm_smccc_1_0_smc_0(a0, res) \ - __arm_smccc_1_0_smc_1(a0, 0, res) - -#define ___arm_smccc_1_0_smc_count(count, ...) \ - __arm_smccc_1_0_smc_ ## count(__VA_ARGS__) - -#define __arm_smccc_1_0_smc_count(count, ...) \ - ___arm_smccc_1_0_smc_count(count, __VA_ARGS__) - -#define arm_smccc_1_0_smc(...) \ - __arm_smccc_1_0_smc_count(__count_args(__VA_ARGS__), __VA_ARGS__) - -#define arm_smccc_smc(...) \ - do { \ - if ( cpus_have_const_cap(ARM_SMCCC_1_1) ) \ - arm_smccc_1_1_smc(__VA_ARGS__); \ - else \ - arm_smccc_1_0_smc(__VA_ARGS__); \ - } while ( 0 ) -#endif /* CONFIG_ARM_64 */ - -#endif /* __ASSEMBLY__ */ - -/* - * Construct function identifier from call type (fast or standard), - * calling convention (32 or 64 bit), service owner and function number. - */ -#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ - (((type) << ARM_SMCCC_TYPE_SHIFT) | \ - ((calling_convention) << ARM_SMCCC_CONV_SHIFT) | \ - (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ - (func_num)) - -/* List of known service owners */ -#define ARM_SMCCC_OWNER_ARCH 0 -#define ARM_SMCCC_OWNER_CPU 1 -#define ARM_SMCCC_OWNER_SIP 2 -#define ARM_SMCCC_OWNER_OEM 3 -#define ARM_SMCCC_OWNER_STANDARD 4 -#define ARM_SMCCC_OWNER_HYPERVISOR 5 -#define ARM_SMCCC_OWNER_TRUSTED_APP 48 -#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 -#define ARM_SMCCC_OWNER_TRUSTED_OS 50 -#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 - -/* List of generic function numbers */ -#define ARM_SMCCC_CALL_COUNT_FID(owner) \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_##owner, \ - 0xFF00) - -#define ARM_SMCCC_CALL_UID_FID(owner) \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_##owner, \ - 0xFF01) - -#define ARM_SMCCC_REVISION_FID(owner) \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_##owner, \ - 0xFF03) - -#define ARM_SMCCC_VERSION_FID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_ARCH, \ - 0x0) \ - -#define ARM_SMCCC_ARCH_FEATURES_FID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_ARCH, \ - 0x1) - -#define ARM_SMCCC_ARCH_WORKAROUND_1_FID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_ARCH, \ - 0x8000) - -#define ARM_SMCCC_ARCH_WORKAROUND_2_FID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_ARCH, \ - 0x7FFF) - -/* SMCCC error codes */ -#define ARM_SMCCC_NOT_REQUIRED (-2) -#define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1) -#define ARM_SMCCC_NOT_SUPPORTED (-1) -#define ARM_SMCCC_SUCCESS (0) - -/* SMCCC function identifier range which is reserved for existing APIs */ -#define ARM_SMCCC_RESERVED_RANGE_START 0x0 -#define ARM_SMCCC_RESERVED_RANGE_END 0x0100FFFF - -#endif /* __ASM_ARM_SMCCC_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End:b - */ diff --git a/xen/include/asm-arm/smp.h b/xen/include/asm-arm/smp.h deleted file mode 100644 index af5a2fe652..0000000000 --- a/xen/include/asm-arm/smp.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -#ifndef __ASSEMBLY__ -#include -#include -#include -#endif - -DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask); -DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask); - -#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) - -#define smp_processor_id() get_processor_id() - -/* - * Do we, for platform reasons, need to actually keep CPUs online when we - * would otherwise prefer them to be off? - */ -#define park_offline_cpus false - -extern void noreturn stop_cpu(void); - -extern int arch_smp_init(void); -extern int arch_cpu_init(int cpu, struct dt_device_node *dn); -extern int arch_cpu_up(int cpu); - -int cpu_up_send_sgi(int cpu); - -/* Secondary CPU entry point */ -extern void init_secondary(void); - -extern void smp_init_cpus(void); -extern void smp_clear_cpu_maps (void); -extern int smp_get_max_cpus (void); -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/softirq.h b/xen/include/asm-arm/softirq.h deleted file mode 100644 index 976e0ebd70..0000000000 --- a/xen/include/asm-arm/softirq.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __ASM_SOFTIRQ_H__ -#define __ASM_SOFTIRQ_H__ - -#define NR_ARCH_SOFTIRQS 0 - -#define arch_skip_send_event_check(cpu) 0 - -#endif /* __ASM_SOFTIRQ_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/spinlock.h b/xen/include/asm-arm/spinlock.h deleted file mode 100644 index 42b0f584fe..0000000000 --- a/xen/include/asm-arm/spinlock.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -#define arch_lock_acquire_barrier() smp_mb() -#define arch_lock_release_barrier() smp_mb() - -#define arch_lock_relax() wfe() -#define arch_lock_signal() do { \ - dsb(ishst); \ - sev(); \ -} while(0) - -#define arch_lock_signal_wmb() arch_lock_signal() - -#endif /* __ASM_SPINLOCK_H */ diff --git a/xen/include/asm-arm/string.h b/xen/include/asm-arm/string.h deleted file mode 100644 index b485e49044..0000000000 --- a/xen/include/asm-arm/string.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef __ARM_STRING_H__ -#define __ARM_STRING_H__ - - -/* - * We don't do inline string functions, since the - * optimised inline asm versions are not small. - */ - -#define __HAVE_ARCH_STRRCHR -#define __HAVE_ARCH_STRCHR -#if defined(CONFIG_ARM_64) -#define __HAVE_ARCH_STRCMP -#define __HAVE_ARCH_STRNCMP -#define __HAVE_ARCH_STRLEN -#define __HAVE_ARCH_STRNLEN -#endif - -#define __HAVE_ARCH_MEMCPY -#if defined(CONFIG_ARM_64) -#define __HAVE_ARCH_MEMCMP -#endif -#define __HAVE_ARCH_MEMMOVE -#define __HAVE_ARCH_MEMSET -#define __HAVE_ARCH_MEMCHR - -#if defined(CONFIG_ARM_32) - -void __memzero(void *ptr, size_t n); - -#define memset(p, v, n) \ - ({ \ - void *__p = (p); size_t __n = n; \ - if ((__n) != 0) { \ - if (__builtin_constant_p((v)) && (v) == 0) \ - __memzero((__p),(__n)); \ - else \ - memset((__p),(v),(__n)); \ - } \ - (__p); \ - }) - -#endif - -#endif /* __ARM_STRING_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/sysregs.h b/xen/include/asm-arm/sysregs.h deleted file mode 100644 index 5c5c51bbcd..0000000000 --- a/xen/include/asm-arm/sysregs.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __ASM_ARM_SYSREGS_H -#define __ASM_ARM_SYSREGS_H - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -#endif /* __ASM_ARM_SYSREGS_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ - - diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h deleted file mode 100644 index 65d5c8e423..0000000000 --- a/xen/include/asm-arm/system.h +++ /dev/null @@ -1,73 +0,0 @@ -/* Portions taken from Linux arch arm */ -#ifndef __ASM_SYSTEM_H -#define __ASM_SYSTEM_H - -#include -#include - -#define sev() asm volatile("sev" : : : "memory") -#define wfe() asm volatile("wfe" : : : "memory") -#define wfi() asm volatile("wfi" : : : "memory") - -#define isb() asm volatile("isb" : : : "memory") -#define dsb(scope) asm volatile("dsb " #scope : : : "memory") -#define dmb(scope) asm volatile("dmb " #scope : : : "memory") - -#define mb() dsb(sy) -#ifdef CONFIG_ARM_64 -#define rmb() dsb(ld) -#else -#define rmb() dsb(sy) /* 32-bit has no ld variant. */ -#endif -#define wmb() dsb(st) - -#define smp_mb() dmb(ish) -#ifdef CONFIG_ARM_64 -#define smp_rmb() dmb(ishld) -#else -#define smp_rmb() dmb(ish) /* 32-bit has no ishld variant. */ -#endif - -#define smp_wmb() dmb(ishst) - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() - -/* - * This is used to ensure the compiler did actually allocate the register we - * asked it for some inline assembly sequences. Apparently we can't trust - * the compiler from one version to another so a bit of paranoia won't hurt. - * This string is meant to be concatenated with the inline asm string and - * will cause compilation to stop on mismatch. - * (for details, see gcc PR 15089) - */ -#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "unknown ARM variant" -#endif - -static inline int local_abort_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !(flags & PSR_ABT_MASK); -} - -#define arch_fetch_and_add(x, v) __sync_fetch_and_add(x, v) - -extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next); - -#endif -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/tee/optee_msg.h b/xen/include/asm-arm/tee/optee_msg.h deleted file mode 100644 index fe743dbde3..0000000000 --- a/xen/include/asm-arm/tee/optee_msg.h +++ /dev/null @@ -1,310 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (c) 2015-2017, Linaro Limited - */ -#ifndef _OPTEE_MSG_H -#define _OPTEE_MSG_H - -#include -#include - -/* - * This file defines the OP-TEE message protocol used to communicate - * with an instance of OP-TEE running in secure world. - */ - -/***************************************************************************** - * Part 1 - formatting of messages - *****************************************************************************/ - -#define OPTEE_MSG_ATTR_TYPE_NONE 0x0 -#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1 -#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2 -#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3 -#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 -#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 -#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 -#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 -#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa -#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb - -#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0) - -/* - * Meta parameter to be absorbed by the Secure OS and not passed - * to the Trusted Application. - * - * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION. - */ -#define OPTEE_MSG_ATTR_META BIT(8, UL) - -/* - * Pointer to a list of pages used to register user-defined SHM buffer. - * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*. - * buf_ptr should point to the beginning of the buffer. Buffer will contain - * list of page addresses. OP-TEE core can reconstruct contiguous buffer from - * that page addresses list. Page addresses are stored as 64 bit values. - * Last entry on a page should point to the next page of buffer. - * Every entry in buffer should point to a 4k page beginning (12 least - * significant bits must be equal to zero). - * - * 12 least significant of optee_msg_param.u.tmem.buf_ptr should hold page - * offset of user buffer. - * - * So, entries should be placed like members of this structure: - * - * struct page_data { - * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1]; - * uint64_t next_page_data; - * }; - * - * Structure is designed to exactly fit into the page size - * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page. - * - * The size of 4KB is chosen because this is the smallest page size for ARM - * architectures. If REE uses larger pages, it should divide them to 4KB ones. - */ -#define OPTEE_MSG_ATTR_NONCONTIG BIT(9, UL) - -/* - * Memory attributes for caching passed with temp memrefs. The actual value - * used is defined outside the message protocol with the exception of - * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already - * defined for the memory range should be used. If optee_smc.h is used as - * bearer of this protocol OPTEE_SMC_SHM_* is used for values. - */ -#define OPTEE_MSG_ATTR_CACHE_SHIFT 16 -#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0) -#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0 - -/* - * Same values as TEE_LOGIN_* from TEE Internal API - */ -#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000 -#define OPTEE_MSG_LOGIN_USER 0x00000001 -#define OPTEE_MSG_LOGIN_GROUP 0x00000002 -#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004 -#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 -#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 - -/* - * Page size used in non-contiguous buffer entries - */ -#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096 - -#ifndef ASM -/** - * struct optee_msg_param_tmem - temporary memory reference parameter - * @buf_ptr: Address of the buffer - * @size: Size of the buffer - * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm - * - * Secure and normal world communicates pointers as physical address - * instead of the virtual address. This is because secure and normal world - * have completely independent memory mapping. Normal world can even have a - * hypervisor which need to translate the guest physical address (AKA IPA - * in ARM documentation) to a real physical address before passing the - * structure to secure world. - */ -struct optee_msg_param_tmem { - uint64_t buf_ptr; - uint64_t size; - uint64_t shm_ref; -}; - -/** - * struct optee_msg_param_rmem - registered memory reference parameter - * @offs: Offset into shared memory reference - * @size: Size of the buffer - * @shm_ref: Shared memory reference, pointer to a struct tee_shm - */ -struct optee_msg_param_rmem { - uint64_t offs; - uint64_t size; - uint64_t shm_ref; -}; - -/** - * struct optee_msg_param_value - values - * @a: first value - * @b: second value - * @c: third value - */ -struct optee_msg_param_value { - uint64_t a; - uint64_t b; - uint64_t c; -}; - -/** - * struct optee_msg_param - parameter - * @attr: attributes - * @memref: a memory reference - * @value: a value - * - * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in - * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, - * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and - * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. - * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. - */ -struct optee_msg_param { - uint64_t attr; - union { - struct optee_msg_param_tmem tmem; - struct optee_msg_param_rmem rmem; - struct optee_msg_param_value value; - } u; -}; - -/** - * struct optee_msg_arg - call argument - * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_* - * @func: Trusted Application function, specific to the Trusted Application, - * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND - * @session: In parameter for all OPTEE_MSG_CMD_* except - * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead - * @cancel_id: Cancellation id, a unique value to identify this request - * @ret: return value - * @ret_origin: origin of the return value - * @num_params: number of parameters supplied to the OS Command - * @params: the parameters supplied to the OS Command - * - * All normal calls to Trusted OS uses this struct. If cmd requires further - * information than what these fields hold it can be passed as a parameter - * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding - * attrs field). All parameters tagged as meta have to come first. - */ -struct optee_msg_arg { - uint32_t cmd; - uint32_t func; - uint32_t session; - uint32_t cancel_id; - uint32_t pad; - uint32_t ret; - uint32_t ret_origin; - uint32_t num_params; - - /* num_params tells the actual number of element in params */ - struct optee_msg_param params[]; -}; - -/** - * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg - * - * @num_params: Number of parameters embedded in the struct optee_msg_arg - * - * Returns the size of the struct optee_msg_arg together with the number - * of embedded parameters. - */ -#define OPTEE_MSG_GET_ARG_SIZE(num_params) \ - (sizeof(struct optee_msg_arg) + \ - sizeof(struct optee_msg_param) * (num_params)) - -/* - * Defines the maximum value of @num_params that can be passed to - * OPTEE_MSG_GET_ARG_SIZE without a risk of crossing page boundary. - */ -#define OPTEE_MSG_MAX_NUM_PARAMS \ - ((OPTEE_MSG_NONCONTIG_PAGE_SIZE - sizeof(struct optee_msg_arg)) / \ - sizeof(struct optee_msg_param)) - -#endif /*ASM*/ - -/***************************************************************************** - * Part 2 - requests from normal world - *****************************************************************************/ - -/* - * Return the following UID if using API specified in this file without - * further extensions: - * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. - * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, - * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3. - */ -#define OPTEE_MSG_UID_0 0x384fb3e0 -#define OPTEE_MSG_UID_1 0xe7f811e3 -#define OPTEE_MSG_UID_2 0xaf630002 -#define OPTEE_MSG_UID_3 0xa5d5c51b -#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01 - -/* - * Returns 2.0 if using API specified in this file without further - * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR - * and OPTEE_MSG_REVISION_MINOR - */ -#define OPTEE_MSG_REVISION_MAJOR 2 -#define OPTEE_MSG_REVISION_MINOR 0 -#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03 - -/* - * Get UUID of Trusted OS. - * - * Used by non-secure world to figure out which Trusted OS is installed. - * Note that returned UUID is the UUID of the Trusted OS, not of the API. - * - * Returns UUID in 4 32-bit words in the same way as - * OPTEE_MSG_FUNCID_CALLS_UID described above. - */ -#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0 -#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3 -#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002 -#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b -#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000 - -/* - * Get revision of Trusted OS. - * - * Used by non-secure world to figure out which version of the Trusted OS - * is installed. Note that the returned revision is the revision of the - * Trusted OS, not of the API. - * - * Returns revision in 2 32-bit words in the same way as - * OPTEE_MSG_CALLS_REVISION described above. - */ -#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 - -/* - * Do a secure call with struct optee_msg_arg as argument - * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd - * - * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application. - * The first two parameters are tagged as meta, holding two value - * parameters to pass the following information: - * param[0].u.value.a-b uuid of Trusted Application - * param[1].u.value.a-b uuid of Client - * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_* - * - * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened - * session to a Trusted Application. struct optee_msg_arg::func is Trusted - * Application function, specific to the Trusted Application. - * - * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to - * Trusted Application. - * - * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command. - * - * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The - * information is passed as: - * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT - * [| OPTEE_MSG_ATTR_NONCONTIG] - * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) - * [in] param[0].u.tmem.size size (of first fragment) - * [in] param[0].u.tmem.shm_ref holds shared memory reference - * - * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared - * memory reference. The information is passed as: - * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT - * [in] param[0].u.rmem.shm_ref holds shared memory reference - * [in] param[0].u.rmem.offs 0 - * [in] param[0].u.rmem.size 0 - */ -#define OPTEE_MSG_CMD_OPEN_SESSION 0 -#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 -#define OPTEE_MSG_CMD_CLOSE_SESSION 2 -#define OPTEE_MSG_CMD_CANCEL 3 -#define OPTEE_MSG_CMD_REGISTER_SHM 4 -#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 -#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 - -#endif /* _OPTEE_MSG_H */ diff --git a/xen/include/asm-arm/tee/optee_rpc_cmd.h b/xen/include/asm-arm/tee/optee_rpc_cmd.h deleted file mode 100644 index d6b9dfe30c..0000000000 --- a/xen/include/asm-arm/tee/optee_rpc_cmd.h +++ /dev/null @@ -1,318 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (c) 2016-2017, Linaro Limited - */ - -#ifndef __OPTEE_RPC_CMD_H -#define __OPTEE_RPC_CMD_H - -/* - * All RPC is done with a struct optee_msg_arg as bearer of information, - * struct optee_msg_arg::arg holds values defined by OPTEE_RPC_CMD_* below. - * Only the commands handled by the kernel driver are defined here. - * - * RPC communication with tee-supplicant is reversed compared to normal - * client communication described above. The supplicant receives requests - * and sends responses. - */ - -/* - * Load a TA into memory - * - * Since the size of the TA isn't known in advance the size of the TA is - * can be queried with a NULL buffer. - * - * [in] value[0].a-b UUID - * [out] memref[1] Buffer with TA - */ -#define OPTEE_RPC_CMD_LOAD_TA 0 - -/* - * Replay Protected Memory Block access - * - * [in] memref[0] Frames to device - * [out] memref[1] Frames from device - */ -#define OPTEE_RPC_CMD_RPMB 1 - -/* - * File system access, see definition of protocol below - */ -#define OPTEE_RPC_CMD_FS 2 - -/* - * Get time - * - * Returns number of seconds and nano seconds since the Epoch, - * 1970-01-01 00:00:00 +0000 (UTC). - * - * [out] value[0].a Number of seconds - * [out] value[0].b Number of nano seconds. - */ -#define OPTEE_RPC_CMD_GET_TIME 3 - -/* - * Wait queue primitive, helper for secure world to implement a wait queue. - * - * If secure world needs to wait for a secure world mutex it issues a sleep - * request instead of spinning in secure world. Conversely is a wakeup - * request issued when a secure world mutex with a thread waiting thread is - * unlocked. - * - * Waiting on a key - * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP - * [in] value[0].b Wait key - * - * Waking up a key - * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP - * [in] value[0].b Wakeup key - */ -#define OPTEE_RPC_CMD_WAIT_QUEUE 4 -#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0 -#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1 - -/* - * Suspend execution - * - * [in] value[0].a Number of milliseconds to suspend - */ -#define OPTEE_RPC_CMD_SUSPEND 5 - -/* - * Allocate a piece of shared memory - * - * [in] value[0].a Type of memory one of - * OPTEE_RPC_SHM_TYPE_* below - * [in] value[0].b Requested size - * [in] value[0].c Required alignment - * [out] memref[0] Buffer - */ -#define OPTEE_RPC_CMD_SHM_ALLOC 6 -/* Memory that can be shared with a non-secure user space application */ -#define OPTEE_RPC_SHM_TYPE_APPL 0 -/* Memory only shared with non-secure kernel */ -#define OPTEE_RPC_SHM_TYPE_KERNEL 1 -/* - * Memory shared with non-secure kernel and exported to a non-secure user - * space application - */ -#define OPTEE_RPC_SHM_TYPE_GLOBAL 2 - -/* - * Free shared memory previously allocated with OPTEE_RPC_CMD_SHM_ALLOC - * - * [in] value[0].a Type of memory one of - * OPTEE_RPC_SHM_TYPE_* above - * [in] value[0].b Value of shared memory reference or cookie - */ -#define OPTEE_RPC_CMD_SHM_FREE 7 - -/* Was OPTEE_RPC_CMD_SQL_FS, which isn't supported any longer */ -#define OPTEE_RPC_CMD_SQL_FS_RESERVED 8 - -/* - * Send TA profiling information to normal world - * - * [in/out] value[0].a File identifier. Must be set to 0 on - * first call. A value >= 1 will be - * returned on success. Re-use this value - * to append data to the same file. - * [in] memref[1] TA UUID - * [in] memref[2] Profile data - */ -#define OPTEE_RPC_CMD_GPROF 9 - -/* - * Socket command, see definition of protocol below - */ -#define OPTEE_RPC_CMD_SOCKET 10 - -/* - * Register timestamp buffer in the linux kernel optee driver - * - * [in] value[0].a Subcommand (register buffer, unregister buffer) - * [in] value[0].b Physical address of timestamp buffer - * [in] value[0].c Size of buffer - */ -#define OPTEE_RPC_CMD_BENCH_REG 20 - -/* - * Definition of protocol for command OPTEE_RPC_CMD_FS - */ - -/* - * Open a file - * - * [in] value[0].a OPTEE_RPC_FS_OPEN - * [in] memref[1] A string holding the file name - * [out] value[2].a File descriptor of open file - */ -#define OPTEE_RPC_FS_OPEN 0 - -/* - * Create a file - * - * [in] value[0].a OPTEE_RPC_FS_CREATE - * [in] memref[1] A string holding the file name - * [out] value[2].a File descriptor of open file - */ -#define OPTEE_RPC_FS_CREATE 1 - -/* - * Close a file - * - * [in] value[0].a OPTEE_RPC_FS_CLOSE - * [in] value[0].b File descriptor of open file. - */ -#define OPTEE_RPC_FS_CLOSE 2 - -/* - * Read from a file - * - * [in] value[0].a OPTEE_RPC_FS_READ - * [in] value[0].b File descriptor of open file - * [in] value[0].c Offset into file - * [out] memref[1] Buffer to hold returned data - */ -#define OPTEE_RPC_FS_READ 3 - -/* - * Write to a file - * - * [in] value[0].a OPTEE_RPC_FS_WRITE - * [in] value[0].b File descriptor of open file - * [in] value[0].c Offset into file - * [in] memref[1] Buffer holding data to be written - */ -#define OPTEE_RPC_FS_WRITE 4 - -/* - * Truncate a file - * - * [in] value[0].a OPTEE_RPC_FS_TRUNCATE - * [in] value[0].b File descriptor of open file - * [in] value[0].c Length of file. - */ -#define OPTEE_RPC_FS_TRUNCATE 5 - -/* - * Remove a file - * - * [in] value[0].a OPTEE_RPC_FS_REMOVE - * [in] memref[1] A string holding the file name - */ -#define OPTEE_RPC_FS_REMOVE 6 - -/* - * Rename a file - * - * [in] value[0].a OPTEE_RPC_FS_RENAME - * [in] value[0].b True if existing target should be removed - * [in] memref[1] A string holding the old file name - * [in] memref[2] A string holding the new file name - */ -#define OPTEE_RPC_FS_RENAME 7 - -/* - * Opens a directory for file listing - * - * [in] value[0].a OPTEE_RPC_FS_OPENDIR - * [in] memref[1] A string holding the name of the directory - * [out] value[2].a Handle to open directory - */ -#define OPTEE_RPC_FS_OPENDIR 8 - -/* - * Closes a directory handle - * - * [in] value[0].a OPTEE_RPC_FS_CLOSEDIR - * [in] value[0].b Handle to open directory - */ -#define OPTEE_RPC_FS_CLOSEDIR 9 - -/* - * Read next file name of directory - * - * - * [in] value[0].a OPTEE_RPC_FS_READDIR - * [in] value[0].b Handle to open directory - * [out] memref[1] A string holding the file name - */ -#define OPTEE_RPC_FS_READDIR 10 - -/* End of definition of protocol for command OPTEE_RPC_CMD_FS */ - -/* - * Definition of protocol for command OPTEE_RPC_CMD_SOCKET - */ - -#define OPTEE_RPC_SOCKET_TIMEOUT_NONBLOCKING 0 -#define OPTEE_RPC_SOCKET_TIMEOUT_BLOCKING 0xffffffff - -/* - * Open socket - * - * [in] value[0].a OPTEE_RPC_SOCKET_OPEN - * [in] value[0].b TA instance id - * [in] value[1].a Server port number - * [in] value[1].b Protocol, TEE_ISOCKET_PROTOCOLID_* - * [in] value[1].c Ip version TEE_IP_VERSION_* from tee_ipsocket.h - * [in] memref[2] Server address - * [out] value[3].a Socket handle (32-bit) - */ -#define OPTEE_RPC_SOCKET_OPEN 0 - -/* - * Close socket - * - * [in] value[0].a OPTEE_RPC_SOCKET_CLOSE - * [in] value[0].b TA instance id - * [in] value[0].c Socket handle - */ -#define OPTEE_RPC_SOCKET_CLOSE 1 - -/* - * Close all sockets - * - * [in] value[0].a OPTEE_RPC_SOCKET_CLOSE_ALL - * [in] value[0].b TA instance id - */ -#define OPTEE_RPC_SOCKET_CLOSE_ALL 2 - -/* - * Send data on socket - * - * [in] value[0].a OPTEE_RPC_SOCKET_SEND - * [in] value[0].b TA instance id - * [in] value[0].c Socket handle - * [in] memref[1] Buffer to transmit - * [in] value[2].a Timeout ms or OPTEE_RPC_SOCKET_TIMEOUT_* - * [out] value[2].b Number of transmitted bytes - */ -#define OPTEE_RPC_SOCKET_SEND 3 - -/* - * Receive data on socket - * - * [in] value[0].a OPTEE_RPC_SOCKET_RECV - * [in] value[0].b TA instance id - * [in] value[0].c Socket handle - * [out] memref[1] Buffer to receive - * [in] value[2].a Timeout ms or OPTEE_RPC_SOCKET_TIMEOUT_* - */ -#define OPTEE_RPC_SOCKET_RECV 4 - -/* - * Perform IOCTL on socket - * - * [in] value[0].a OPTEE_RPC_SOCKET_IOCTL - * [in] value[0].b TA instance id - * [in] value[0].c Socket handle - * [in/out] memref[1] Buffer - * [in] value[2].a Ioctl command - */ -#define OPTEE_RPC_SOCKET_IOCTL 5 - -/* End of definition of protocol for command OPTEE_RPC_CMD_SOCKET */ - -#endif /*__OPTEE_RPC_CMD_H*/ diff --git a/xen/include/asm-arm/tee/optee_smc.h b/xen/include/asm-arm/tee/optee_smc.h deleted file mode 100644 index 2f5c702326..0000000000 --- a/xen/include/asm-arm/tee/optee_smc.h +++ /dev/null @@ -1,567 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (c) 2015, Linaro Limited - */ -#ifndef OPTEE_SMC_H -#define OPTEE_SMC_H - -/* - * This file is exported by OP-TEE and is in kept in sync between secure - * world and normal world kernel driver. We're following ARM SMC Calling - * Convention as specified in - * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html - * - * This file depends on optee_msg.h being included to expand the SMC id - * macros below. - */ - - -#define OPTEE_SMC_STD_CALL_VAL(func_num) \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) -#define OPTEE_SMC_FAST_CALL_VAL(func_num) \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) - -/* - * Function specified by SMC Calling convention. - */ -#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00 -#define OPTEE_SMC_CALLS_COUNT \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_TRUSTED_OS_END, \ - OPTEE_SMC_FUNCID_CALLS_COUNT) - -/* - * Normal cached memory (write-back), shareable for SMP systems and not - * shareable for UP systems. - */ -#define OPTEE_SMC_SHM_CACHED 1 - -/* - * a0..a7 is used as register names in the descriptions below, on arm32 - * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's - * 32-bit registers. - */ - -/* - * Function specified by SMC Calling convention - * - * Return the following UID if using API specified in this file - * without further extensions: - * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. - * see also OPTEE_MSG_UID_* in optee_msg.h - */ -#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID -#define OPTEE_SMC_CALLS_UID \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_TRUSTED_OS_END, \ - OPTEE_SMC_FUNCID_CALLS_UID) - -/* - * Function specified by SMC Calling convention - * - * Returns 2.0 if using API specified in this file without further extensions. - * see also OPTEE_MSG_REVISION_* in optee_msg.h - */ -#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION -#define OPTEE_SMC_CALLS_REVISION \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_CONV_32, \ - ARM_SMCCC_OWNER_TRUSTED_OS_END, \ - OPTEE_SMC_FUNCID_CALLS_REVISION) - -/* - * Get UUID of Trusted OS. - * - * Used by non-secure world to figure out which Trusted OS is installed. - * Note that returned UUID is the UUID of the Trusted OS, not of the API. - * - * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID - * described above. - */ -#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID -#define OPTEE_SMC_CALL_GET_OS_UUID \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID) - -/* - * Get revision of Trusted OS. - * - * Used by non-secure world to figure out which version of the Trusted OS - * is installed. Note that the returned revision is the revision of the - * Trusted OS, not of the API. - * - * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION - * described above. May optionally return a 32-bit build identifier in a2, - * with zero meaning unspecified. - */ -#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION -#define OPTEE_SMC_CALL_GET_OS_REVISION \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) - -/* - * Call with struct optee_msg_arg as argument - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG - * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg - * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg - * a3 Cache settings, not used if physical pointer is in a predefined shared - * memory area else per OPTEE_SMC_SHM_* - * a4-6 Not used - * a7 Hypervisor Client ID register - * - * Normal return register usage: - * a0 Return value, OPTEE_SMC_RETURN_* - * a1-3 Not used - * a4-7 Preserved - * - * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage: - * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT - * a1-3 Preserved - * a4-7 Preserved - * - * RPC return register usage: - * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val) - * a1-2 RPC parameters - * a3-7 Resume information, must be preserved - * - * Possible return values: - * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this - * function. - * OPTEE_SMC_RETURN_OK Call completed, result updated in - * the previously supplied struct - * optee_msg_arg. - * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded, - * try again later. - * OPTEE_SMC_RETURN_EBADADDR Bad physical pointer to struct - * optee_msg_arg. - * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg - * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal - * world. - */ -#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG -#define OPTEE_SMC_CALL_WITH_ARG \ - OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) - -/* - * Get Shared Memory Config - * - * Returns the Secure/Non-secure shared memory config. - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG - * a1-6 Not used - * a7 Hypervisor Client ID register - * - * Have config return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1 Physical address of start of SHM - * a2 Size of of SHM - * a3 Cache settings of memory, as defined by the - * OPTEE_SMC_SHM_* values above - * a4-7 Preserved - * - * Not available register usage: - * a0 OPTEE_SMC_RETURN_ENOTAVAIL - * a1-3 Not used - * a4-7 Preserved - */ -#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7 -#define OPTEE_SMC_GET_SHM_CONFIG \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG) - -/* - * Configures L2CC mutex - * - * Disables, enables usage of L2CC mutex. Returns or sets physical address - * of L2CC mutex. - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC_L2CC_MUTEX - * a1 OPTEE_SMC_L2CC_MUTEX_GET_ADDR Get physical address of mutex - * OPTEE_SMC_L2CC_MUTEX_SET_ADDR Set physical address of mutex - * OPTEE_SMC_L2CC_MUTEX_ENABLE Enable usage of mutex - * OPTEE_SMC_L2CC_MUTEX_DISABLE Disable usage of mutex - * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, upper 32bit of a 64bit - * physical address of mutex - * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_SET_ADDR, lower 32bit of a 64bit - * physical address of mutex - * a3-6 Not used - * a7 Hypervisor Client ID register - * - * Have config return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1 Preserved - * a2 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, upper 32bit of a 64bit - * physical address of mutex - * a3 if a1 == OPTEE_SMC_L2CC_MUTEX_GET_ADDR, lower 32bit of a 64bit - * physical address of mutex - * a3-7 Preserved - * - * Error return register usage: - * a0 OPTEE_SMC_RETURN_ENOTAVAIL Physical address not available - * OPTEE_SMC_RETURN_EBADADDR Bad supplied physical address - * OPTEE_SMC_RETURN_EBADCMD Unsupported value in a1 - * a1-7 Preserved - */ -#define OPTEE_SMC_L2CC_MUTEX_GET_ADDR 0 -#define OPTEE_SMC_L2CC_MUTEX_SET_ADDR 1 -#define OPTEE_SMC_L2CC_MUTEX_ENABLE 2 -#define OPTEE_SMC_L2CC_MUTEX_DISABLE 3 -#define OPTEE_SMC_FUNCID_L2CC_MUTEX 8 -#define OPTEE_SMC_L2CC_MUTEX \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_L2CC_MUTEX) - -/* - * Exchanges capabilities between normal world and secure world - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES - * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_* - * a2-6 Not used - * a7 Hypervisor Client ID register - * - * Normal return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* - * a2-7 Preserved - * - * Error return register usage: - * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world - * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* - * a2-7 Preserved - */ -/* Normal world works as a uniprocessor system */ -#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR (1 << 0) -/* Secure world has reserved shared memory for normal world to use */ -#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM (1 << 0) -/* Secure world can communicate via previously unregistered shared memory */ -#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM (1 << 1) - -/* - * Secure world supports commands "register/unregister shared memory", - * secure world accepts command buffers located in any parts of non-secure RAM - */ -#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM (1 << 2) - -/* Secure world supports Shared Memory with a NULL reference */ -#define OPTEE_SMC_SEC_CAP_MEMREF_NULL (1 << 4) - -#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 -#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) - -/* - * Disable and empties cache of shared memory objects - * - * Secure world can cache frequently used shared memory objects, for - * example objects used as RPC arguments. When secure world is idle this - * function returns one shared memory reference to free. To disable the - * cache and free all cached objects this function has to be called until - * it returns OPTEE_SMC_RETURN_ENOTAVAIL. - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE - * a1-6 Not used - * a7 Hypervisor Client ID register - * - * Normal return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1 Upper 32 bits of a 64-bit Shared memory cookie - * a2 Lower 32 bits of a 64-bit Shared memory cookie - * a3-7 Preserved - * - * Cache empty return register usage: - * a0 OPTEE_SMC_RETURN_ENOTAVAIL - * a1-7 Preserved - * - * Not idle return register usage: - * a0 OPTEE_SMC_RETURN_EBUSY - * a1-7 Preserved - */ -#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10 -#define OPTEE_SMC_DISABLE_SHM_CACHE \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE) - -/* - * Enable cache of shared memory objects - * - * Secure world can cache frequently used shared memory objects, for - * example objects used as RPC arguments. When secure world is idle this - * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If - * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned. - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE - * a1-6 Not used - * a7 Hypervisor Client ID register - * - * Normal return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1-7 Preserved - * - * Not idle return register usage: - * a0 OPTEE_SMC_RETURN_EBUSY - * a1-7 Preserved - */ -#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11 -#define OPTEE_SMC_ENABLE_SHM_CACHE \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) - -/* - * Release of secondary cores - * - * OP-TEE in secure world is in charge of the release process of secondary - * cores. The Rich OS issue the this request to ask OP-TEE to boot up the - * secondary cores, go through the OP-TEE per-core initialization, and then - * switch to the Non-seCure world with the Rich OS provided entry address. - * The secondary cores enter Non-Secure world in SVC mode, with Thumb, FIQ, - * IRQ and Abort bits disabled. - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC_BOOT_SECONDARY - * a1 Index of secondary core to boot - * a2 Upper 32 bits of a 64-bit Non-Secure world entry physical address - * a3 Lower 32 bits of a 64-bit Non-Secure world entry physical address - * a4-7 Not used - * - * Normal return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1-7 Preserved - * - * Error return: - * a0 OPTEE_SMC_RETURN_EBADCMD Core index out of range - * a1-7 Preserved - * - * Not idle return register usage: - * a0 OPTEE_SMC_RETURN_EBUSY - * a1-7 Preserved - */ -#define OPTEE_SMC_FUNCID_BOOT_SECONDARY 12 -#define OPTEE_SMC_BOOT_SECONDARY \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_BOOT_SECONDARY) - -/* - * Inform OP-TEE about a new virtual machine - * - * Hypervisor issues this call during virtual machine (guest) creation. - * OP-TEE records client id of new virtual machine and prepares - * to receive requests from it. This call is available only if OP-TEE - * was built with virtualization support. - * - * Call requests usage: - * a0 SMC Function ID, OPTEE_SMC_VM_CREATED - * a1 Hypervisor Client ID of newly created virtual machine - * a2-6 Not used - * a7 Hypervisor Client ID register. Must be 0, because only hypervisor - * can issue this call - * - * Normal return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1-7 Preserved - * - * Error return: - * a0 OPTEE_SMC_RETURN_ENOTAVAIL OP-TEE have no resources for - * another VM - * a1-7 Preserved - * - */ -#define OPTEE_SMC_FUNCID_VM_CREATED 13 -#define OPTEE_SMC_VM_CREATED \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_CREATED) - -/* - * Inform OP-TEE about shutdown of a virtual machine - * - * Hypervisor issues this call during virtual machine (guest) destruction. - * OP-TEE will clean up all resources associated with this VM. This call is - * available only if OP-TEE was built with virtualization support. - * - * Call requests usage: - * a0 SMC Function ID, OPTEE_SMC_VM_DESTROYED - * a1 Hypervisor Client ID of virtual machine being shut down - * a2-6 Not used - * a7 Hypervisor Client ID register. Must be 0, because only hypervisor - * can issue this call - * - * Normal return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1-7 Preserved - * - */ -#define OPTEE_SMC_FUNCID_VM_DESTROYED 14 -#define OPTEE_SMC_VM_DESTROYED \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_VM_DESTROYED) - -/* - * Query OP-TEE about number of supported threads - * - * Normal World OS or Hypervisor issues this call to find out how many - * threads OP-TEE supports. That is how many standard calls can be issued - * in parallel before OP-TEE will return OPTEE_SMC_RETURN_ETHREAD_LIMIT. - * - * Call requests usage: - * a0 SMC Function ID, OPTEE_SMC_GET_THREAD_COUNT - * a1-6 Not used - * a7 Hypervisor Client ID register - * - * Normal return register usage: - * a0 OPTEE_SMC_RETURN_OK - * a1 Number of threads - * a2-7 Preserved - * - * Error return: - * a0 OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Requested call is not implemented - * a1-7 Preserved - */ -#define OPTEE_SMC_FUNCID_GET_THREAD_COUNT 15 -#define OPTEE_SMC_GET_THREAD_COUNT \ - OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT) - -/* - * Resume from RPC (for example after processing a foreign interrupt) - * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC - * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned - * OPTEE_SMC_RETURN_RPC in a0 - * - * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above. - * - * Possible return values - * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this - * function. - * OPTEE_SMC_RETURN_OK Original call completed, result - * updated in the previously supplied. - * struct optee_msg_arg - * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal - * world. - * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume - * information was corrupt. - */ -#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3 -#define OPTEE_SMC_CALL_RETURN_FROM_RPC \ - OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC) - -#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000 -#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000 -#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF - -#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \ - ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK) - -#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX) - -/* - * Allocate memory for RPC parameter passing. The memory is used to hold a - * struct optee_msg_arg. - * - * "Call" register usage: - * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC - * a1 Size in bytes of required argument memory - * a2 Not used - * a3 Resume information, must be preserved - * a4-5 Not used - * a6-7 Resume information, must be preserved - * - * "Return" register usage: - * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. - * a1 Upper 32 bits of 64-bit physical pointer to allocated - * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't - * be allocated. - * a2 Lower 32 bits of 64-bit physical pointer to allocated - * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't - * be allocated - * a3 Preserved - * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing - * the memory or doing an RPC - * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing - * the memory or doing an RPC - * a6-7 Preserved - */ -#define OPTEE_SMC_RPC_FUNC_ALLOC 0 -#define OPTEE_SMC_RETURN_RPC_ALLOC \ - OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC) - -/* - * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC - * - * "Call" register usage: - * a0 This value, OPTEE_SMC_RETURN_RPC_FREE - * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this - * argument memory - * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this - * argument memory - * a3-7 Resume information, must be preserved - * - * "Return" register usage: - * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. - * a1-2 Not used - * a3-7 Preserved - */ -#define OPTEE_SMC_RPC_FUNC_FREE 2 -#define OPTEE_SMC_RETURN_RPC_FREE \ - OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) - -/* - * Deliver a foreign interrupt in normal world. - * - * "Call" register usage: - * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR - * a1-7 Resume information, must be preserved - * - * "Return" register usage: - * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. - * a1-7 Preserved - */ -#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4 -#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \ - OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR) - -/* - * Do an RPC request. The supplied struct optee_msg_arg tells which - * request to do and the parameters for the request. The following fields - * are used (the rest are unused): - * - cmd the Request ID - * - ret return value of the request, filled in by normal world - * - num_params number of parameters for the request - * - params the parameters - * - param_attrs attributes of the parameters - * - * "Call" register usage: - * a0 OPTEE_SMC_RETURN_RPC_CMD - * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a - * struct optee_msg_arg, must be preserved, only the data should - * be updated - * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a - * struct optee_msg_arg, must be preserved, only the data should - * be updated - * a3-7 Resume information, must be preserved - * - * "Return" register usage: - * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. - * a1-2 Not used - * a3-7 Preserved - */ -#define OPTEE_SMC_RPC_FUNC_CMD 5 -#define OPTEE_SMC_RETURN_RPC_CMD \ - OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD) - -/* Returned in a0 */ -#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF - -/* Returned in a0 only from Trusted OS functions */ -#define OPTEE_SMC_RETURN_OK 0x0 -#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1 -#define OPTEE_SMC_RETURN_EBUSY 0x2 -#define OPTEE_SMC_RETURN_ERESUME 0x3 -#define OPTEE_SMC_RETURN_EBADADDR 0x4 -#define OPTEE_SMC_RETURN_EBADCMD 0x5 -#define OPTEE_SMC_RETURN_ENOMEM 0x6 -#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7 -#define OPTEE_SMC_RETURN_IS_RPC(ret) \ - (((ret) != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION) && \ - ((((ret) & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == \ - OPTEE_SMC_RETURN_RPC_PREFIX))) - -#endif /* OPTEE_SMC_H */ diff --git a/xen/include/asm-arm/tee/tee.h b/xen/include/asm-arm/tee/tee.h deleted file mode 100644 index f483986385..0000000000 --- a/xen/include/asm-arm/tee/tee.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * xen/include/asm-arm/tee/tee.h - * - * Generic part of TEE mediator subsystem - * - * Volodymyr Babchuk - * Copyright (c) 2018 EPAM Systems. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ARCH_ARM_TEE_TEE_H__ -#define __ARCH_ARM_TEE_TEE_H__ - -#include -#include - -#include - -#ifdef CONFIG_TEE - -struct tee_mediator_ops { - /* - * Probe for TEE. Should return true if TEE found and - * mediator is initialized. - */ - bool (*probe)(void); - - /* - * Called during domain construction if toolstack requests to enable - * TEE support so mediator can inform TEE about new - * guest and create own structures for the new domain. - */ - int (*domain_init)(struct domain *d); - - /* - * Called during domain destruction to relinquish resources used - * by mediator itself. This function can return -ERESTART to indicate - * that it does not finished work and should be called again. - */ - int (*relinquish_resources)(struct domain *d); - - /* Handle SMCCC call for current domain. */ - bool (*handle_call)(struct cpu_user_regs *regs); -}; - -struct tee_mediator_desc { - /* Printable name of the TEE. */ - const char *name; - - /* Mediator callbacks as described above. */ - const struct tee_mediator_ops *ops; - - /* - * ID of TEE. Corresponds to xen_arch_domainconfig.tee_type. - * Should be one of XEN_DOMCTL_CONFIG_TEE_xxx - */ - uint16_t tee_type; -}; - -bool tee_handle_call(struct cpu_user_regs *regs); -int tee_domain_init(struct domain *d, uint16_t tee_type); -int tee_relinquish_resources(struct domain *d); -uint16_t tee_get_type(void); - -#define REGISTER_TEE_MEDIATOR(_name, _namestr, _type, _ops) \ -static const struct tee_mediator_desc __tee_desc_##_name __used \ -__section(".teemediator.info") = { \ - .name = _namestr, \ - .ops = _ops, \ - .tee_type = _type \ -} - -#else - -static inline bool tee_handle_call(struct cpu_user_regs *regs) -{ - return false; -} - -static inline int tee_domain_init(struct domain *d, uint16_t tee_type) -{ - if ( likely(tee_type == XEN_DOMCTL_CONFIG_TEE_NONE) ) - return 0; - - return -ENODEV; -} - -static inline int tee_relinquish_resources(struct domain *d) -{ - return 0; -} - -static inline uint16_t tee_get_type(void) -{ - return XEN_DOMCTL_CONFIG_TEE_NONE; -} - -#endif /* CONFIG_TEE */ - -#endif /* __ARCH_ARM_TEE_TEE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/time.h b/xen/include/asm-arm/time.h deleted file mode 100644 index 4b401c1110..0000000000 --- a/xen/include/asm-arm/time.h +++ /dev/null @@ -1,118 +0,0 @@ -#ifndef __ARM_TIME_H__ -#define __ARM_TIME_H__ - -#include -#include -#include - -#define DT_MATCH_TIMER \ - DT_MATCH_COMPATIBLE("arm,armv7-timer"), \ - DT_MATCH_COMPATIBLE("arm,armv8-timer") - -typedef uint64_t cycles_t; - -/* - * Ensure that reads of the counter are treated the same as memory reads - * for the purposes of ordering by subsequent memory barriers. - */ -#if defined(CONFIG_ARM_64) -#define read_cntpct_enforce_ordering(val) do { \ - uint64_t tmp, _val = (val); \ - \ - asm volatile( \ - "eor %0, %1, %1\n" \ - "add %0, sp, %0\n" \ - "ldr xzr, [%0]" \ - : "=r" (tmp) : "r" (_val)); \ -} while (0) -#else -#define read_cntpct_enforce_ordering(val) do {} while (0) -#endif - -static inline cycles_t read_cntpct_stable(void) -{ - /* - * ARM_WORKAROUND_858921: Cortex-A73 (all versions) counter read - * can return a wrong value when the counter crosses a 32bit boundary. - */ - if ( !check_workaround_858921() ) - return READ_SYSREG64(CNTPCT_EL0); - else - { - /* - * A recommended workaround for erratum 858921 is to: - * 1- Read twice CNTPCT. - * 2- Compare bit[32] of the two read values. - * - If bit[32] is different, keep the old value. - * - If bit[32] is the same, keep the new value. - */ - cycles_t old, new; - old = READ_SYSREG64(CNTPCT_EL0); - new = READ_SYSREG64(CNTPCT_EL0); - return (((old ^ new) >> 32) & 1) ? old : new; - } -} - -static inline cycles_t get_cycles(void) -{ - cycles_t cnt; - - isb(); - cnt = read_cntpct_stable(); - - /* - * If there is not any barrier here. When get_cycles being used in - * some seqlock critical context in the future, the seqlock can be - * speculated potentially. - * - * To prevent seqlock from being speculated silently, we add a barrier - * here defensively. Normally, we just need an ISB here is enough, but - * considering the minimum performance cost. We prefer to use enforce - * order here. - */ - read_cntpct_enforce_ordering(cnt); - - return cnt; -} - -/* List of timer's IRQ */ -enum timer_ppi -{ - TIMER_PHYS_SECURE_PPI = 0, - TIMER_PHYS_NONSECURE_PPI = 1, - TIMER_VIRT_PPI = 2, - TIMER_HYP_PPI = 3, - MAX_TIMER_PPI = 4, -}; - -/* - * Value of "clock-frequency" in the DT timer node if present. - * 0 means the property doesn't exist. - */ -extern uint32_t timer_dt_clock_frequency; - -/* Get one of the timer IRQ number */ -unsigned int timer_get_irq(enum timer_ppi ppi); - -/* Set up the timer interrupt on this CPU */ -extern void init_timer_interrupt(void); - -/* Counter value at boot time */ -extern uint64_t boot_count; - -extern s_time_t ticks_to_ns(uint64_t ticks); -extern uint64_t ns_to_ticks(s_time_t ns); - -void preinit_xen_time(void); - -void force_update_vcpu_system_time(struct vcpu *v); - -#endif /* __ARM_TIME_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/trace.h b/xen/include/asm-arm/trace.h deleted file mode 100644 index e06def61f6..0000000000 --- a/xen/include/asm-arm/trace.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_TRACE_H__ -#define __ASM_TRACE_H__ - -#endif /* __ASM_TRACE_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/traps.h b/xen/include/asm-arm/traps.h deleted file mode 100644 index 2ed2b85c6f..0000000000 --- a/xen/include/asm-arm/traps.h +++ /dev/null @@ -1,121 +0,0 @@ -#ifndef __ASM_ARM_TRAPS__ -#define __ASM_ARM_TRAPS__ - -#include -#include - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#endif - -/* - * GUEST_BUG_ON is intended for checking that the guest state has not been - * corrupted in hardware and/or that the hardware behaves as we - * believe it should (i.e. that certain traps can only occur when the - * guest is in a particular mode). - * - * The intention is to limit the damage such h/w bugs (or spec - * misunderstandings) can do by turning them into Denial of Service - * attacks instead of e.g. information leaks or privilege escalations. - * - * GUEST_BUG_ON *MUST* *NOT* be used to check for guest controllable state! - * - * Compared with regular BUG_ON it dumps the guest vcpu state instead - * of Xen's state. - */ -#define guest_bug_on_failed(p) \ -do { \ - show_execution_state(guest_cpu_user_regs()); \ - panic("Guest Bug: %pv: '%s', line %d, file %s\n", \ - current, p, __LINE__, __FILE__); \ -} while (0) -#define GUEST_BUG_ON(p) \ - do { if ( unlikely(p) ) guest_bug_on_failed(#p); } while (0) - -int check_conditional_instr(struct cpu_user_regs *regs, const union hsr hsr); - -void advance_pc(struct cpu_user_regs *regs, const union hsr hsr); - -void inject_undef_exception(struct cpu_user_regs *regs, const union hsr hsr); - -/* read as zero and write ignore */ -void handle_raz_wi(struct cpu_user_regs *regs, int regidx, bool read, - const union hsr hsr, int min_el); - -/* write only as write ignore */ -void handle_wo_wi(struct cpu_user_regs *regs, int regidx, bool read, - const union hsr hsr, int min_el); - -/* read only as read as zero */ -void handle_ro_raz(struct cpu_user_regs *regs, int regidx, bool read, - const union hsr hsr, int min_el); - -/* Read only as value provided with 'val' argument */ -void handle_ro_read_val(struct cpu_user_regs *regs, int regidx, bool read, - const union hsr hsr, int min_el, register_t val); - -/* Co-processor registers emulation (see arch/arm/vcpreg.c). */ -void do_cp15_32(struct cpu_user_regs *regs, const union hsr hsr); -void do_cp15_64(struct cpu_user_regs *regs, const union hsr hsr); -void do_cp14_32(struct cpu_user_regs *regs, const union hsr hsr); -void do_cp14_64(struct cpu_user_regs *regs, const union hsr hsr); -void do_cp14_dbg(struct cpu_user_regs *regs, const union hsr hsr); -void do_cp10(struct cpu_user_regs *regs, const union hsr hsr); -void do_cp(struct cpu_user_regs *regs, const union hsr hsr); - -/* SMCCC handling */ -void do_trap_smc(struct cpu_user_regs *regs, const union hsr hsr); -void do_trap_hvc_smccc(struct cpu_user_regs *regs); - -int do_bug_frame(const struct cpu_user_regs *regs, vaddr_t pc); - -void noreturn do_unexpected_trap(const char *msg, - const struct cpu_user_regs *regs); - -/* Functions for pending virtual abort checking window. */ -void abort_guest_exit_start(void); -void abort_guest_exit_end(void); - -static inline bool VABORT_GEN_BY_GUEST(const struct cpu_user_regs *regs) -{ - return ((unsigned long)abort_guest_exit_start == regs->pc) || - (unsigned long)abort_guest_exit_end == regs->pc; -} - -/* Check whether the sign extension is required and perform it */ -static inline register_t sign_extend(const struct hsr_dabt dabt, register_t r) -{ - uint8_t size = (1 << dabt.size) * 8; - - /* - * Sign extend if required. - * Note that we expect the read handler to have zeroed the bits - * outside the requested access size. - */ - if ( dabt.sign && (size < sizeof(register_t) * 8) && - (r & (1UL << (size - 1))) ) - { - /* - * We are relying on register_t using the same as - * an unsigned long in order to keep the 32-bit assembly - * code smaller. - */ - BUILD_BUG_ON(sizeof(register_t) != sizeof(unsigned long)); - r |= (~0UL) << size; - } - - return r; -} - -#endif /* __ASM_ARM_TRAPS__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ - diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h deleted file mode 100644 index 083acbd151..0000000000 --- a/xen/include/asm-arm/types.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef __ARM_TYPES_H__ -#define __ARM_TYPES_H__ - -#ifndef __ASSEMBLY__ - - -typedef __signed__ char __s8; -typedef unsigned char __u8; - -typedef __signed__ short __s16; -typedef unsigned short __u16; - -typedef __signed__ int __s32; -typedef unsigned int __u32; - -#if defined(__GNUC__) && !defined(__STRICT_ANSI__) -#if defined(CONFIG_ARM_32) -typedef __signed__ long long __s64; -typedef unsigned long long __u64; -#elif defined (CONFIG_ARM_64) -typedef __signed__ long __s64; -typedef unsigned long __u64; -#endif -#endif - -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -#if defined(CONFIG_ARM_32) -typedef signed long long s64; -typedef unsigned long long u64; -typedef u32 vaddr_t; -#define PRIvaddr PRIx32 -typedef u64 paddr_t; -#define INVALID_PADDR (~0ULL) -#define PRIpaddr "016llx" -typedef u32 register_t; -#define PRIregister "08x" -#elif defined (CONFIG_ARM_64) -typedef signed long s64; -typedef unsigned long u64; -typedef u64 vaddr_t; -#define PRIvaddr PRIx64 -typedef u64 paddr_t; -#define INVALID_PADDR (~0UL) -#define PRIpaddr "016lx" -typedef u64 register_t; -#define PRIregister "016lx" -#endif - -#if defined(__SIZE_TYPE__) -typedef __SIZE_TYPE__ size_t; -#else -typedef unsigned long size_t; -#endif -typedef signed long ssize_t; - -#if defined(__PTRDIFF_TYPE__) -typedef __PTRDIFF_TYPE__ ptrdiff_t; -#else -typedef signed long ptrdiff_t; -#endif - -#endif /* __ASSEMBLY__ */ - -#endif /* __ARM_TYPES_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/vfp.h b/xen/include/asm-arm/vfp.h deleted file mode 100644 index 142a91ef8b..0000000000 --- a/xen/include/asm-arm/vfp.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _ASM_VFP_H -#define _ASM_VFP_H - -struct vcpu; - -#if defined(CONFIG_ARM_32) -# include -#elif defined(CONFIG_ARM_64) -# include -#else -# error "Unknown ARM variant" -#endif - -void vfp_save_state(struct vcpu *v); -void vfp_restore_state(struct vcpu *v); - -#endif /* _ASM_VFP_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/vgic-emul.h b/xen/include/asm-arm/vgic-emul.h deleted file mode 100644 index e52fbaa3ec..0000000000 --- a/xen/include/asm-arm/vgic-emul.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __ASM_ARM_VGIC_EMUL_H__ -#define __ASM_ARM_VGIC_EMUL_H__ - -/* - * Helpers to create easily a case to match emulate a single register or - * a range of registers - */ - -#define VREG32(reg) reg ... reg + 3 -#define VREG64(reg) reg ... reg + 7 - -#define VRANGE32(start, end) start ... end + 3 -#define VRANGE64(start, end) start ... end + 7 - -/* - * 64 bits registers can be accessible using 32-bit and 64-bit unless - * stated otherwise (See 8.1.3 ARM IHI 0069A). - */ -static inline bool vgic_reg64_check_access(struct hsr_dabt dabt) -{ - return ( dabt.size == DABT_DOUBLE_WORD || dabt.size == DABT_WORD ); -} - -#endif /* __ASM_ARM_VGIC_EMUL_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h deleted file mode 100644 index ade427a808..0000000000 --- a/xen/include/asm-arm/vgic.h +++ /dev/null @@ -1,383 +0,0 @@ -/* - * ARM Virtual Generic Interrupt Controller support - * - * Ian Campbell - * Copyright (c) 2011 Citrix Systems. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_ARM_VGIC_H__ -#define __ASM_ARM_VGIC_H__ - -#ifdef CONFIG_NEW_VGIC -#include -#else - -#include -#include - -struct pending_irq -{ - /* - * The following two states track the lifecycle of the guest irq. - * However because we are not sure and we don't want to track - * whether an irq added to an LR register is PENDING or ACTIVE, the - * following states are just an approximation. - * - * GIC_IRQ_GUEST_QUEUED: the irq is asserted and queued for - * injection into the guest's LRs. - * - * GIC_IRQ_GUEST_VISIBLE: the irq has been added to an LR register, - * therefore the guest is aware of it. From the guest point of view - * the irq can be pending (if the guest has not acked the irq yet) - * or active (after acking the irq). - * - * In order for the state machine to be fully accurate, for level - * interrupts, we should keep the interrupt's pending state until - * the guest deactivates the irq. However because we are not sure - * when that happens, we instead track whether there is an interrupt - * queued using GIC_IRQ_GUEST_QUEUED. We clear it when we add it to - * an LR register. We set it when we receive another interrupt - * notification. Therefore it is possible to set - * GIC_IRQ_GUEST_QUEUED while the irq is GIC_IRQ_GUEST_VISIBLE. We - * could also change the state of the guest irq in the LR register - * from active to active and pending, but for simplicity we simply - * inject a second irq after the guest EOIs the first one. - * - * - * An additional state is used to keep track of whether the guest - * irq is enabled at the vgicd level: - * - * GIC_IRQ_GUEST_ENABLED: the guest IRQ is enabled at the VGICD - * level (GICD_ICENABLER/GICD_ISENABLER). - * - * GIC_IRQ_GUEST_MIGRATING: the irq is being migrated to a different - * vcpu while it is still inflight and on an GICH_LR register on the - * old vcpu. - * - * GIC_IRQ_GUEST_PRISTINE_LPI: the IRQ is a newly mapped LPI, which - * has never been in an LR before. This means that any trace of an - * LPI with the same number in an LR must be from an older LPI, which - * has been unmapped before. - * - */ -#define GIC_IRQ_GUEST_QUEUED 0 -#define GIC_IRQ_GUEST_ACTIVE 1 -#define GIC_IRQ_GUEST_VISIBLE 2 -#define GIC_IRQ_GUEST_ENABLED 3 -#define GIC_IRQ_GUEST_MIGRATING 4 -#define GIC_IRQ_GUEST_PRISTINE_LPI 5 - unsigned long status; - struct irq_desc *desc; /* only set if the irq corresponds to a physical irq */ - unsigned int irq; -#define GIC_INVALID_LR (uint8_t)~0 - uint8_t lr; - uint8_t priority; - uint8_t lpi_priority; /* Caches the priority if this is an LPI. */ - uint8_t lpi_vcpu_id; /* The VCPU for an LPI. */ - /* inflight is used to append instances of pending_irq to - * vgic.inflight_irqs */ - struct list_head inflight; - /* lr_queue is used to append instances of pending_irq to - * lr_pending. lr_pending is a per vcpu queue, therefore lr_queue - * accesses are protected with the vgic lock. - * TODO: when implementing irq migration, taking only the current - * vgic lock is not going to be enough. */ - struct list_head lr_queue; -}; - -#define NR_INTERRUPT_PER_RANK 32 -#define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1) - -/* Represents state corresponding to a block of 32 interrupts */ -struct vgic_irq_rank { - spinlock_t lock; /* Covers access to all other members of this struct */ - - uint8_t index; - - uint32_t ienable; - uint32_t icfg[2]; - - /* - * Provide efficient access to the priority of an vIRQ while keeping - * the emulation simple. - * Note, this is working fine as long as Xen is using little endian. - */ - union { - uint8_t priority[32]; - uint32_t ipriorityr[8]; - }; - - /* - * It's more convenient to store a target VCPU per vIRQ - * than the register ITARGETSR/IROUTER itself. - * Use atomic operations to read/write the vcpu fields to avoid - * taking the rank lock. - */ - uint8_t vcpu[32]; -}; - -struct vgic_dist { - /* Version of the vGIC */ - enum gic_version version; - /* GIC HW version specific vGIC driver handler */ - const struct vgic_ops *handler; - /* - * Covers access to other members of this struct _except_ for - * shared_irqs where each member contains its own locking. - * - * If both class of lock is required then this lock must be - * taken first. If multiple rank locks are required (including - * the per-vcpu private_irqs rank) then they must be taken in - * rank order. - */ - spinlock_t lock; - uint32_t ctlr; - int nr_spis; /* Number of SPIs */ - unsigned long *allocated_irqs; /* bitmap of IRQs allocated */ - struct vgic_irq_rank *shared_irqs; - /* - * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in - * struct arch_vcpu. - */ - struct pending_irq *pending_irqs; - /* Base address for guest GIC */ - paddr_t dbase; /* Distributor base address */ -#ifdef CONFIG_GICV3 - /* GIC V3 addressing */ - /* List of contiguous occupied by the redistributors */ - struct vgic_rdist_region { - paddr_t base; /* Base address */ - paddr_t size; /* Size */ - unsigned int first_cpu; /* First CPU handled */ - } *rdist_regions; - int nr_regions; /* Number of rdist regions */ - unsigned long int nr_lpis; - uint64_t rdist_propbase; - struct rb_root its_devices; /* Devices mapped to an ITS */ - spinlock_t its_devices_lock; /* Protects the its_devices tree */ - struct radix_tree_root pend_lpi_tree; /* Stores struct pending_irq's */ - rwlock_t pend_lpi_tree_lock; /* Protects the pend_lpi_tree */ - struct list_head vits_list; /* List of virtual ITSes */ - unsigned int intid_bits; - /* - * TODO: if there are more bool's being added below, consider - * a flags variable instead. - */ - bool rdists_enabled; /* Is any redistributor enabled? */ - bool has_its; -#endif -}; - -struct vgic_cpu { - /* - * SGIs and PPIs are per-VCPU, SPIs are domain global and in - * struct arch_domain. - */ - struct pending_irq pending_irqs[32]; - struct vgic_irq_rank *private_irqs; - - /* This list is ordered by IRQ priority and it is used to keep - * track of the IRQs that the VGIC injected into the guest. - * Depending on the availability of LR registers, the IRQs might - * actually be in an LR, and therefore injected into the guest, - * or queued in gic.lr_pending. - * As soon as an IRQ is EOI'd by the guest and removed from the - * corresponding LR it is also removed from this list. */ - struct list_head inflight_irqs; - /* lr_pending is used to queue IRQs (struct pending_irq) that the - * vgic tried to inject in the guest (calling gic_raise_guest_irq) but - * no LRs were available at the time. - * As soon as an LR is freed we remove the first IRQ from this - * list and write it to the LR register. - * lr_pending is a subset of vgic.inflight_irqs. */ - struct list_head lr_pending; - spinlock_t lock; - - /* GICv3: redistributor base and flags for this vCPU */ - paddr_t rdist_base; - uint64_t rdist_pendbase; -#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ -#define VGIC_V3_LPIS_ENABLED (1 << 1) - uint8_t flags; -}; - -struct sgi_target { - uint8_t aff1; - uint16_t list; -}; - -static inline void sgi_target_init(struct sgi_target *sgi_target) -{ - sgi_target->aff1 = 0; - sgi_target->list = 0; -} - -struct vgic_ops { - /* Initialize vGIC */ - int (*vcpu_init)(struct vcpu *v); - /* Domain specific initialization of vGIC */ - int (*domain_init)(struct domain *d); - /* Release resources that were allocated by domain_init */ - void (*domain_free)(struct domain *d); - /* vGIC sysreg/cpregs emulate */ - bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr); - /* lookup the struct pending_irq for a given LPI interrupt */ - struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int vlpi); - int (*lpi_get_priority)(struct domain *d, uint32_t vlpi); -}; - -/* Number of ranks of interrupt registers for a domain */ -#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_spis+31)/32) - -#define vgic_lock(v) spin_lock_irq(&(v)->domain->arch.vgic.lock) -#define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock) - -#define vgic_lock_rank(v, r, flags) spin_lock_irqsave(&(r)->lock, flags) -#define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags) - -/* - * Rank containing GICD_ for GICD_ with - * -bits-per-interrupt - */ -static inline int REG_RANK_NR(int b, uint32_t n) -{ - switch ( b ) - { - /* - * IRQ ranks are of size 32. So n cannot be shifted beyond 5 for 32 - * and above. For 64-bit n is already shifted DBAT_DOUBLE_WORD - * by the caller - */ - case 64: - case 32: return n >> 5; - case 16: return n >> 4; - case 8: return n >> 3; - case 4: return n >> 2; - case 2: return n >> 1; - case 1: return n; - default: BUG(); - } -} - -enum gic_sgi_mode; - -/* - * Offset of GICD_ with its rank, for GICD_ size with - * -bits-per-interrupt. - */ -#define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32) - - -extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq); -extern void vgic_remove_irq_from_queues(struct vcpu *v, struct pending_irq *p); -extern void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p); -extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq); -extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq); -extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq); -extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, int s); -extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq); -extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n); -extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n); -extern void vgic_set_irqs_pending(struct vcpu *v, uint32_t r, - unsigned int rank); -extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops); -int vgic_v2_init(struct domain *d, int *mmio_count); -int vgic_v3_init(struct domain *d, int *mmio_count); - -extern bool vgic_to_sgi(struct vcpu *v, register_t sgir, - enum gic_sgi_mode irqmode, int virq, - const struct sgi_target *target); -extern bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq); -extern void vgic_check_inflight_irqs_pending(struct domain *d, struct vcpu *v, - unsigned int rank, uint32_t r); - -#endif /* !CONFIG_NEW_VGIC */ - -/*** Common VGIC functions used by Xen arch code ****/ - -/* - * In the moment vgic_num_irqs() just covers SPIs and the private IRQs, - * as it's mostly used for allocating the pending_irq and irq_desc array, - * in which LPIs don't participate. - */ -#define vgic_num_irqs(d) ((d)->arch.vgic.nr_spis + 32) - -/* - * Allocate a guest VIRQ - * - spi == 0 => allocate a PPI. It will be the same on every vCPU - * - spi == 1 => allocate an SPI - */ -extern int vgic_allocate_virq(struct domain *d, bool spi); -/* Reserve a specific guest vIRQ */ -extern bool vgic_reserve_virq(struct domain *d, unsigned int virq); -extern void vgic_free_virq(struct domain *d, unsigned int virq); - -static inline int vgic_allocate_ppi(struct domain *d) -{ - return vgic_allocate_virq(d, false /* ppi */); -} - -static inline int vgic_allocate_spi(struct domain *d) -{ - return vgic_allocate_virq(d, true /* spi */); -} - -struct irq_desc *vgic_get_hw_irq_desc(struct domain *d, struct vcpu *v, - unsigned int virq); -int vgic_connect_hw_irq(struct domain *d, struct vcpu *v, unsigned int virq, - struct irq_desc *desc, bool connect); - -bool vgic_evtchn_irq_pending(struct vcpu *v); - -int domain_vgic_register(struct domain *d, int *mmio_count); -int domain_vgic_init(struct domain *d, unsigned int nr_spis); -void domain_vgic_free(struct domain *d); -int vcpu_vgic_init(struct vcpu *vcpu); -int vcpu_vgic_free(struct vcpu *vcpu); - -void vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq, - bool level); - -extern void vgic_clear_pending_irqs(struct vcpu *v); - -extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr); - -/* Maximum vCPUs for a specific vGIC version, or 0 for unsupported. */ -unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version); - -void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t csize, - paddr_t vbase, uint32_t aliased_offset); - -#ifdef CONFIG_GICV3 -struct rdist_region; -void vgic_v3_setup_hw(paddr_t dbase, - unsigned int nr_rdist_regions, - const struct rdist_region *regions, - unsigned int intid_bits); -#endif - -void vgic_sync_to_lrs(void); -void vgic_sync_from_lrs(struct vcpu *v); - -int vgic_vcpu_pending_irq(struct vcpu *v); - -#endif /* __ASM_ARM_VGIC_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h deleted file mode 100644 index abe7db1970..0000000000 --- a/xen/include/asm-arm/vm_event.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * vm_event.h: architecture specific vm_event handling routines - * - * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_ARM_VM_EVENT_H__ -#define __ASM_ARM_VM_EVENT_H__ - -#include -#include -#include - -static inline int vm_event_init_domain(struct domain *d) -{ - /* Nothing to do. */ - return 0; -} - -static inline void vm_event_cleanup_domain(struct domain *d) -{ - memset(&d->monitor, 0, sizeof(d->monitor)); -} - -static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, - vm_event_response_t *rsp) -{ - /* Not supported on ARM. */ -} - -static inline -void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) -{ - /* Not supported on ARM. */ -} - -static inline -void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) -{ - /* Not supported on ARM. */ -} - -static inline -void vm_event_sync_event(struct vcpu *v, bool value) -{ - /* Not supported on ARM. */ -} - -static inline -void vm_event_reset_vmtrace(struct vcpu *v) -{ - /* Not supported on ARM. */ -} - -#endif /* __ASM_ARM_VM_EVENT_H__ */ diff --git a/xen/include/asm-arm/vpl011.h b/xen/include/asm-arm/vpl011.h deleted file mode 100644 index e6c7ab7381..0000000000 --- a/xen/include/asm-arm/vpl011.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * include/xen/vpl011.h - * - * Virtual PL011 UART - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef _VPL011_H_ -#define _VPL011_H_ - -#include -#include -#include -#include - -/* helper macros */ -#define VPL011_LOCK(d,flags) spin_lock_irqsave(&(d)->arch.vpl011.lock, flags) -#define VPL011_UNLOCK(d,flags) spin_unlock_irqrestore(&(d)->arch.vpl011.lock, flags) - -#define SBSA_UART_FIFO_SIZE 32 -/* Same size as VUART_BUF_SIZE, used in vuart.c */ -#define SBSA_UART_OUT_BUF_SIZE 128 -struct vpl011_xen_backend { - char in[SBSA_UART_FIFO_SIZE]; - char out[SBSA_UART_OUT_BUF_SIZE]; - XENCONS_RING_IDX in_cons, in_prod; - XENCONS_RING_IDX out_prod; -}; - -struct vpl011 { - bool backend_in_domain; - union { - struct { - void *ring_buf; - struct page_info *ring_page; - } dom; - struct vpl011_xen_backend *xen; - } backend; - uint32_t uartfr; /* Flag register */ - uint32_t uartcr; /* Control register */ - uint32_t uartimsc; /* Interrupt mask register*/ - uint32_t uarticr; /* Interrupt clear register */ - uint32_t uartris; /* Raw interrupt status register */ - uint32_t shadow_uartmis; /* shadow masked interrupt register */ - spinlock_t lock; - evtchn_port_t evtchn; -}; - -struct vpl011_init_info { - domid_t console_domid; - gfn_t gfn; - evtchn_port_t evtchn; -}; - -#ifdef CONFIG_SBSA_VUART_CONSOLE -int domain_vpl011_init(struct domain *d, - struct vpl011_init_info *info); -void domain_vpl011_deinit(struct domain *d); -void vpl011_rx_char_xen(struct domain *d, char c); -#else -static inline int domain_vpl011_init(struct domain *d, - struct vpl011_init_info *info) -{ - return -ENOSYS; -} - -static inline void domain_vpl011_deinit(struct domain *d) { } -#endif -#endif /* _VPL011_H_ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/vpsci.h b/xen/include/asm-arm/vpsci.h deleted file mode 100644 index 0cca5e6830..0000000000 --- a/xen/include/asm-arm/vpsci.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * xen/include/asm-arm/vpsci.h - * - * Julien Grall - * Copyright (c) 2018 Linaro Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; under version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef __ASM_VPSCI_H__ -#define __ASM_VPSCI_H__ - -#include - -/* Number of function implemented by virtual PSCI (only 0.2 or later) */ -#define VPSCI_NR_FUNCS 12 - -/* Functions handle PSCI calls from the guests */ -bool do_vpsci_0_1_call(struct cpu_user_regs *regs, uint32_t fid); -bool do_vpsci_0_2_call(struct cpu_user_regs *regs, uint32_t fid); - -#endif /* __ASM_VPSCI_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/vreg.h b/xen/include/asm-arm/vreg.h deleted file mode 100644 index fa2f4cdb17..0000000000 --- a/xen/include/asm-arm/vreg.h +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Helpers to emulate co-processor and system registers - */ -#ifndef __ASM_ARM_VREG__ -#define __ASM_ARM_VREG__ - -typedef bool (*vreg_reg64_fn_t)(struct cpu_user_regs *regs, uint64_t *r, - bool read); -typedef bool (*vreg_reg_fn_t)(struct cpu_user_regs *regs, register_t *r, - bool read); - -static inline bool vreg_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr, - vreg_reg_fn_t fn) -{ - struct hsr_cp32 cp32 = hsr.cp32; - /* - * Initialize to zero to avoid leaking data if there is an - * implementation error in the emulation (such as not correctly - * setting r). - */ - register_t r = 0; - bool ret; - - if ( !cp32.read ) - r = get_user_reg(regs, cp32.reg); - - ret = fn(regs, &r, cp32.read); - - if ( ret && cp32.read ) - set_user_reg(regs, cp32.reg, r); - - return ret; -} - -static inline bool vreg_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr, - vreg_reg64_fn_t fn) -{ - struct hsr_cp64 cp64 = hsr.cp64; - /* - * Initialize to zero to avoid leaking data if there is an - * implementation error in the emulation (such as not correctly - * setting x). - */ - uint64_t x = 0; - bool ret; - - if ( !cp64.read ) - { - uint32_t r1 = get_user_reg(regs, cp64.reg1); - uint32_t r2 = get_user_reg(regs, cp64.reg2); - - x = (uint64_t)r1 | ((uint64_t)r2 << 32); - } - - ret = fn(regs, &x, cp64.read); - - if ( ret && cp64.read ) - { - set_user_reg(regs, cp64.reg1, x & 0xffffffff); - set_user_reg(regs, cp64.reg2, x >> 32); - } - - return ret; -} - -#ifdef CONFIG_ARM_64 -static inline bool vreg_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr, - vreg_reg_fn_t fn) -{ - struct hsr_sysreg sysreg = hsr.sysreg; - register_t r = 0; - bool ret; - - if ( !sysreg.read ) - r = get_user_reg(regs, sysreg.reg); - - ret = fn(regs, &r, sysreg.read); - - if ( ret && sysreg.read ) - set_user_reg(regs, sysreg.reg, r); - - return ret; -} -#endif - -#define VREG_REG_MASK(size) ((~0UL) >> (BITS_PER_LONG - ((1 << (size)) * 8))) - -/* - * The check on the size supported by the register has to be done by - * the caller of vreg_regN_*. - * - * vreg_reg_* should never be called directly. Instead use the vreg_regN_* - * according to size of the emulated register - * - * Note that the alignment fault will always be taken in the guest - * (see B3.12.7 DDI0406.b). - */ -static inline register_t vreg_reg_extract(unsigned long reg, - unsigned int offset, - enum dabt_size size) -{ - reg >>= 8 * offset; - reg &= VREG_REG_MASK(size); - - return reg; -} - -static inline void vreg_reg_update(unsigned long *reg, register_t val, - unsigned int offset, - enum dabt_size size) -{ - unsigned long mask = VREG_REG_MASK(size); - int shift = offset * 8; - - *reg &= ~(mask << shift); - *reg |= ((unsigned long)val & mask) << shift; -} - -static inline void vreg_reg_setbits(unsigned long *reg, register_t bits, - unsigned int offset, - enum dabt_size size) -{ - unsigned long mask = VREG_REG_MASK(size); - int shift = offset * 8; - - *reg |= ((unsigned long)bits & mask) << shift; -} - -static inline void vreg_reg_clearbits(unsigned long *reg, register_t bits, - unsigned int offset, - enum dabt_size size) -{ - unsigned long mask = VREG_REG_MASK(size); - int shift = offset * 8; - - *reg &= ~(((unsigned long)bits & mask) << shift); -} - -/* N-bit register helpers */ -#define VREG_REG_HELPERS(sz, offmask) \ -static inline register_t vreg_reg##sz##_extract(uint##sz##_t reg, \ - const mmio_info_t *info)\ -{ \ - return vreg_reg_extract(reg, info->gpa & offmask, \ - info->dabt.size); \ -} \ - \ -static inline void vreg_reg##sz##_update(uint##sz##_t *reg, \ - register_t val, \ - const mmio_info_t *info) \ -{ \ - unsigned long tmp = *reg; \ - \ - vreg_reg_update(&tmp, val, info->gpa & offmask, \ - info->dabt.size); \ - \ - *reg = tmp; \ -} \ - \ -static inline void vreg_reg##sz##_setbits(uint##sz##_t *reg, \ - register_t bits, \ - const mmio_info_t *info) \ -{ \ - unsigned long tmp = *reg; \ - \ - vreg_reg_setbits(&tmp, bits, info->gpa & offmask, \ - info->dabt.size); \ - \ - *reg = tmp; \ -} \ - \ -static inline void vreg_reg##sz##_clearbits(uint##sz##_t *reg, \ - register_t bits, \ - const mmio_info_t *info) \ -{ \ - unsigned long tmp = *reg; \ - \ - vreg_reg_clearbits(&tmp, bits, info->gpa & offmask, \ - info->dabt.size); \ - \ - *reg = tmp; \ -} - -/* - * 64 bits registers are only supported on platform with 64-bit long. - * This is also allow us to optimize the 32 bit case by using - * unsigned long rather than uint64_t - */ -#if BITS_PER_LONG == 64 -VREG_REG_HELPERS(64, 0x7); -#endif -VREG_REG_HELPERS(32, 0x3); - -#undef VREG_REG_HELPERS - -#endif /* __ASM_ARM_VREG__ */ diff --git a/xen/include/asm-arm/vtimer.h b/xen/include/asm-arm/vtimer.h deleted file mode 100644 index 9d4fb4c6e8..0000000000 --- a/xen/include/asm-arm/vtimer.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * xen/arch/arm/vtimer.h - * - * ARM Virtual Timer emulation support - * - * Ian Campbell - * Copyright (c) 2011 Citrix Systems. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ARCH_ARM_VTIMER_H__ -#define __ARCH_ARM_VTIMER_H__ - -extern int domain_vtimer_init(struct domain *d, - struct xen_arch_domainconfig *config); -extern int vcpu_vtimer_init(struct vcpu *v); -extern bool vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr); -extern void virt_timer_save(struct vcpu *v); -extern void virt_timer_restore(struct vcpu *v); -extern void vcpu_timer_destroy(struct vcpu *v); -void vtimer_update_irqs(struct vcpu *v); - -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-arm/xenoprof.h b/xen/include/asm-arm/xenoprof.h deleted file mode 100644 index 3db6ce3ab2..0000000000 --- a/xen/include/asm-arm/xenoprof.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_XENOPROF_H__ -#define __ASM_XENOPROF_H__ - -#endif /* __ASM_XENOPROF_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-riscv/config.h b/xen/include/asm-riscv/config.h deleted file mode 100644 index e2ae21de61..0000000000 --- a/xen/include/asm-riscv/config.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef __RISCV_CONFIG_H__ -#define __RISCV_CONFIG_H__ - -#if defined(CONFIG_RISCV_64) -# define LONG_BYTEORDER 3 -# define ELFSIZE 64 -# define MAX_VIRT_CPUS 128u -#else -# error "Unsupported RISCV variant" -#endif - -#define BYTES_PER_LONG (1 << LONG_BYTEORDER) -#define BITS_PER_LONG (BYTES_PER_LONG << 3) -#define POINTER_ALIGN BYTES_PER_LONG - -#define BITS_PER_LLONG 64 - -/* xen_ulong_t is always 64 bits */ -#define BITS_PER_XEN_ULONG 64 - -#define CONFIG_RISCV_L1_CACHE_SHIFT 6 -#define CONFIG_PAGEALLOC_MAX_ORDER 18 -#define CONFIG_DOMU_MAX_ORDER 9 -#define CONFIG_HWDOM_MAX_ORDER 10 - -#define OPT_CONSOLE_STR "dtuart" -#define INVALID_VCPU_ID MAX_VIRT_CPUS - -/* Linkage for RISCV */ -#ifdef __ASSEMBLY__ -#define ALIGN .align 2 - -#define ENTRY(name) \ - .globl name; \ - ALIGN; \ - name: -#endif - -#endif /* __RISCV_CONFIG_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/acpi.h b/xen/include/asm-x86/acpi.h deleted file mode 100644 index 9a9cc4c240..0000000000 --- a/xen/include/asm-x86/acpi.h +++ /dev/null @@ -1,162 +0,0 @@ -#ifndef _ASM_X86_ACPI_H -#define _ASM_X86_ACPI_H - -/* - * Copyright (C) 2001 Paul Diefenbaugh - * Copyright (C) 2001 Patrick Mochel - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#include -#include -#include -#include - -#define COMPILER_DEPENDENT_INT64 long long -#define COMPILER_DEPENDENT_UINT64 unsigned long long - -/* - * Calling conventions: - * - * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) - * ACPI_EXTERNAL_XFACE - External ACPI interfaces - * ACPI_INTERNAL_XFACE - Internal ACPI interfaces - * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces - */ -#define ACPI_SYSTEM_XFACE -#define ACPI_EXTERNAL_XFACE -#define ACPI_INTERNAL_XFACE -#define ACPI_INTERNAL_VAR_XFACE - -/* Asm macros */ - -#define ACPI_ASM_MACROS -#define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() -#define ACPI_FLUSH_CPU_CACHE() wbinvd() - -int __acpi_acquire_global_lock(unsigned int *lock); -int __acpi_release_global_lock(unsigned int *lock); - -#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) - -#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_release_global_lock(&facs->global_lock)) - -/* - * Math helper asm macros - */ -#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ - asm("divl %2;" \ - :"=a"(q32), "=d"(r32) \ - :"r"(d32), \ - "0"(n_lo), "1"(n_hi)) - - -#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ - asm("shrl $1,%2 ;" \ - "rcrl $1,%3;" \ - :"=r"(n_hi), "=r"(n_lo) \ - :"0"(n_hi), "1"(n_lo)) - -extern bool acpi_lapic, acpi_ioapic, acpi_noirq; -extern bool acpi_force, acpi_ht, acpi_disabled; -extern u32 acpi_smi_cmd; -extern u8 acpi_enable_value, acpi_disable_value; -void acpi_pic_sci_set_trigger(unsigned int, u16); - -static inline void disable_acpi(void) -{ - acpi_disabled = 1; - acpi_ht = 0; - acpi_noirq = 1; -} - -static inline void acpi_noirq_set(void) { acpi_noirq = 1; } - -/* routines for saving/restoring kernel state */ -extern int acpi_save_state_mem(void); -extern int acpi_save_state_disk(void); -extern void acpi_restore_state_mem(void); - -extern unsigned long acpi_wakeup_address; - -#define ARCH_HAS_POWER_INIT 1 - -extern s8 acpi_numa; -extern int acpi_scan_nodes(u64 start, u64 end); -#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) - -extern struct acpi_sleep_info acpi_sinfo; -#define acpi_video_flags bootsym(video_flags) -struct xenpf_enter_acpi_sleep; -extern int acpi_enter_sleep(struct xenpf_enter_acpi_sleep *sleep); -extern int acpi_enter_state(u32 state); - -struct acpi_sleep_info { - struct acpi_generic_address pm1a_cnt_blk; - struct acpi_generic_address pm1b_cnt_blk; - struct acpi_generic_address pm1a_evt_blk; - struct acpi_generic_address pm1b_evt_blk; - struct acpi_generic_address sleep_control; - struct acpi_generic_address sleep_status; - union { - uint16_t pm1a_cnt_val; - uint8_t sleep_type_a; - }; - union { - uint16_t pm1b_cnt_val; - uint8_t sleep_type_b; - }; - uint32_t sleep_state; - uint64_t wakeup_vector; - uint32_t vector_width; - bool_t sleep_extended; -}; - -#define MAX_MADT_ENTRIES MAX(256, 2 * NR_CPUS) -extern u32 x86_acpiid_to_apicid[]; -#define MAX_LOCAL_APIC MAX(256, 4 * NR_CPUS) - -#define INVALID_ACPIID (-1U) - -extern u32 pmtmr_ioport; -extern unsigned int pmtmr_width; - -void acpi_iommu_init(void); -int acpi_dmar_init(void); -int acpi_ivrs_init(void); - -void acpi_mmcfg_init(void); - -/* Incremented whenever we transition through S3. Value is 1 during boot. */ -extern uint32_t system_reset_counter; - -void hvm_acpi_power_button(struct domain *d); -void hvm_acpi_sleep_button(struct domain *d); - -/* suspend/resume */ -void save_rest_processor_state(void); -void restore_rest_processor_state(void); - -#define ACPI_MAP_MEM_ATTR PAGE_HYPERVISOR_UCMINUS - -#endif /*__X86_ASM_ACPI_H*/ diff --git a/xen/include/asm-x86/alternative-asm.h b/xen/include/asm-x86/alternative-asm.h deleted file mode 100644 index e6c42d721d..0000000000 --- a/xen/include/asm-x86/alternative-asm.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef _ASM_X86_ALTERNATIVE_ASM_H_ -#define _ASM_X86_ALTERNATIVE_ASM_H_ - -#include - -#ifdef __ASSEMBLY__ - -/* - * Issue one struct alt_instr descriptor entry (need to put it into - * the section .altinstructions, see below). This entry contains - * enough information for the alternatives patching code to patch an - * instruction. See apply_alternatives(). - */ -.macro altinstruction_entry orig repl feature orig_len repl_len pad_len - .long \orig - . - .long \repl - . - .word \feature - .byte \orig_len - .byte \repl_len - .byte \pad_len - .byte 0 /* priv */ -.endm - -.macro mknops nr_bytes -#ifdef HAVE_AS_NOPS_DIRECTIVE - .nops \nr_bytes, ASM_NOP_MAX -#else - .skip \nr_bytes, 0x90 -#endif -.endm - -/* GAS's idea of true is -1, while Clang's idea is 1. */ -#ifdef HAVE_AS_NEGATIVE_TRUE -# define as_true(x) (-(x)) -#else -# define as_true(x) (x) -#endif - -#define decl_orig(insn, padding) \ - .L\@_orig_s: insn; .L\@_orig_e: \ - .L\@_diff = padding; \ - mknops (as_true(.L\@_diff > 0) * .L\@_diff); \ - .L\@_orig_p: - -#define orig_len (.L\@_orig_e - .L\@_orig_s) -#define pad_len (.L\@_orig_p - .L\@_orig_e) -#define total_len (.L\@_orig_p - .L\@_orig_s) - -#define decl_repl(insn, nr) .L\@_repl_s\()nr: insn; .L\@_repl_e\()nr: -#define repl_len(nr) (.L\@_repl_e\()nr - .L\@_repl_s\()nr) - -#define as_max(a, b) ((a) ^ (((a) ^ (b)) & -as_true((a) < (b)))) - -.macro ALTERNATIVE oldinstr, newinstr, feature - decl_orig(\oldinstr, repl_len(1) - orig_len) - - .pushsection .altinstructions, "a", @progbits - altinstruction_entry .L\@_orig_s, .L\@_repl_s1, \feature, \ - orig_len, repl_len(1), pad_len - - .section .discard, "a", @progbits - /* - * Assembler-time checks: - * - total_len <= 255 - * - \newinstr <= total_len - */ - .byte total_len - .byte 0xff + repl_len(1) - total_len - - .section .altinstr_replacement, "ax", @progbits - - decl_repl(\newinstr, 1) - - .popsection -.endm - -.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 - decl_orig(\oldinstr, as_max(repl_len(1), repl_len(2)) - orig_len) - - .pushsection .altinstructions, "a", @progbits - - altinstruction_entry .L\@_orig_s, .L\@_repl_s1, \feature1, \ - orig_len, repl_len(1), pad_len - altinstruction_entry .L\@_orig_s, .L\@_repl_s2, \feature2, \ - orig_len, repl_len(2), pad_len - - .section .discard, "a", @progbits - /* - * Assembler-time checks: - * - total_len <= 255 - * - \newinstr* <= total_len - */ - .byte total_len - .byte 0xff + repl_len(1) - total_len - .byte 0xff + repl_len(2) - total_len - - .section .altinstr_replacement, "ax", @progbits - - decl_repl(\newinstr1, 1) - decl_repl(\newinstr2, 2) - - .popsection -.endm - -#undef as_max -#undef repl_len -#undef decl_repl -#undef total_len -#undef pad_len -#undef orig_len -#undef decl_orig -#undef as_true - -#endif /* __ASSEMBLY__ */ -#endif /* _ASM_X86_ALTERNATIVE_ASM_H_ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/alternative.h b/xen/include/asm-x86/alternative.h deleted file mode 100644 index a7a82c2c03..0000000000 --- a/xen/include/asm-x86/alternative.h +++ /dev/null @@ -1,387 +0,0 @@ -#ifndef __X86_ALTERNATIVE_H__ -#define __X86_ALTERNATIVE_H__ - -#ifdef __ASSEMBLY__ -#include -#else -#include -#include -#include - -struct __packed alt_instr { - int32_t orig_offset; /* original instruction */ - int32_t repl_offset; /* offset to replacement instruction */ - uint16_t cpuid; /* cpuid bit set for replacement */ - uint8_t orig_len; /* length of original instruction */ - uint8_t repl_len; /* length of new instruction */ - uint8_t pad_len; /* length of build-time padding */ - uint8_t priv; /* Private, for use by apply_alternatives() */ -}; - -#define __ALT_PTR(a,f) ((uint8_t *)((void *)&(a)->f + (a)->f)) -#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) -#define ALT_REPL_PTR(a) __ALT_PTR(a, repl_offset) - -extern void add_nops(void *insns, unsigned int len); -/* Similar to alternative_instructions except it can be run with IRQs enabled. */ -extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); -extern void alternative_instructions(void); -extern void alternative_branches(void); - -#define alt_orig_len "(.LXEN%=_orig_e - .LXEN%=_orig_s)" -#define alt_pad_len "(.LXEN%=_orig_p - .LXEN%=_orig_e)" -#define alt_total_len "(.LXEN%=_orig_p - .LXEN%=_orig_s)" -#define alt_repl_s(num) ".LXEN%=_repl_s"#num -#define alt_repl_e(num) ".LXEN%=_repl_e"#num -#define alt_repl_len(num) "(" alt_repl_e(num) " - " alt_repl_s(num) ")" - -/* GAS's idea of true is -1, while Clang's idea is 1. */ -#ifdef HAVE_AS_NEGATIVE_TRUE -# define AS_TRUE "-" -#else -# define AS_TRUE "" -#endif - -#define as_max(a, b) "(("a") ^ ((("a") ^ ("b")) & -("AS_TRUE"(("a") < ("b")))))" - -#define OLDINSTR(oldinstr, padding) \ - ".LXEN%=_orig_s:\n\t" oldinstr "\n .LXEN%=_orig_e:\n\t" \ - ".LXEN%=_diff = " padding "\n\t" \ - "mknops ("AS_TRUE"(.LXEN%=_diff > 0) * .LXEN%=_diff)\n\t" \ - ".LXEN%=_orig_p:\n\t" - -#define OLDINSTR_1(oldinstr, n1) \ - OLDINSTR(oldinstr, alt_repl_len(n1) "-" alt_orig_len) - -#define OLDINSTR_2(oldinstr, n1, n2) \ - OLDINSTR(oldinstr, \ - as_max(alt_repl_len(n1), \ - alt_repl_len(n2)) "-" alt_orig_len) - -#define ALTINSTR_ENTRY(feature, num) \ - " .long .LXEN%=_orig_s - .\n" /* label */ \ - " .long " alt_repl_s(num)" - .\n" /* new instruction */ \ - " .word " __stringify(feature) "\n" /* feature bit */ \ - " .byte " alt_orig_len "\n" /* source len */ \ - " .byte " alt_repl_len(num) "\n" /* replacement len */ \ - " .byte " alt_pad_len "\n" /* padding len */ \ - " .byte 0\n" /* priv */ - -#define DISCARD_ENTRY(num) /* repl <= total */ \ - " .byte 0xff + (" alt_repl_len(num) ") - (" alt_total_len ")\n" - -#define ALTINSTR_REPLACEMENT(newinstr, num) /* replacement */ \ - alt_repl_s(num)":\n\t" newinstr "\n" alt_repl_e(num) ":\n\t" - -/* alternative assembly primitive: */ -#define ALTERNATIVE(oldinstr, newinstr, feature) \ - OLDINSTR_1(oldinstr, 1) \ - ".pushsection .altinstructions, \"a\", @progbits\n" \ - ALTINSTR_ENTRY(feature, 1) \ - ".section .discard, \"a\", @progbits\n" \ - ".byte " alt_total_len "\n" /* total_len <= 255 */ \ - DISCARD_ENTRY(1) \ - ".section .altinstr_replacement, \"ax\", @progbits\n" \ - ALTINSTR_REPLACEMENT(newinstr, 1) \ - ".popsection\n" - -#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ - OLDINSTR_2(oldinstr, 1, 2) \ - ".pushsection .altinstructions, \"a\", @progbits\n" \ - ALTINSTR_ENTRY(feature1, 1) \ - ALTINSTR_ENTRY(feature2, 2) \ - ".section .discard, \"a\", @progbits\n" \ - ".byte " alt_total_len "\n" /* total_len <= 255 */ \ - DISCARD_ENTRY(1) \ - DISCARD_ENTRY(2) \ - ".section .altinstr_replacement, \"ax\", @progbits\n" \ - ALTINSTR_REPLACEMENT(newinstr1, 1) \ - ALTINSTR_REPLACEMENT(newinstr2, 2) \ - ".popsection\n" - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") - -#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ - asm volatile (ALTERNATIVE_2(oldinstr, newinstr1, feature1, \ - newinstr2, feature2) \ - : : : "memory") - -/* - * Alternative inline assembly with input. - * - * Pecularities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement make sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ - : : input) - -/* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ - : output : input) - -/* - * This is similar to alternative_io. But it has two features and - * respective instructions. - * - * If CPU has feature2, newinstr2 is used. - * Otherwise, if CPU has feature1, newinstr1 is used. - * Otherwise, oldinstr is used. - */ -#define alternative_io_2(oldinstr, newinstr1, feature1, newinstr2, \ - feature2, output, input...) \ - asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, \ - newinstr2, feature2) \ - : output : input) - -/* Use this macro(s) if you need more than one output parameter. */ -#define ASM_OUTPUT2(a...) a - -/* - * Machinery to allow converting indirect to direct calls, when the called - * function is determined once at boot and later never changed. - */ - -#define ALT_CALL_arg1 "rdi" -#define ALT_CALL_arg2 "rsi" -#define ALT_CALL_arg3 "rdx" -#define ALT_CALL_arg4 "rcx" -#define ALT_CALL_arg5 "r8" -#define ALT_CALL_arg6 "r9" - -#define ALT_CALL_ARG(arg, n) \ - register typeof(arg) a ## n ## _ asm ( ALT_CALL_arg ## n ) = \ - ({ BUILD_BUG_ON(sizeof(arg) > sizeof(void *)); (arg); }) -#define ALT_CALL_NO_ARG(n) \ - register unsigned long a ## n ## _ asm ( ALT_CALL_arg ## n ) - -#define ALT_CALL_NO_ARG6 ALT_CALL_NO_ARG(6) -#define ALT_CALL_NO_ARG5 ALT_CALL_NO_ARG(5); ALT_CALL_NO_ARG6 -#define ALT_CALL_NO_ARG4 ALT_CALL_NO_ARG(4); ALT_CALL_NO_ARG5 -#define ALT_CALL_NO_ARG3 ALT_CALL_NO_ARG(3); ALT_CALL_NO_ARG4 -#define ALT_CALL_NO_ARG2 ALT_CALL_NO_ARG(2); ALT_CALL_NO_ARG3 -#define ALT_CALL_NO_ARG1 ALT_CALL_NO_ARG(1); ALT_CALL_NO_ARG2 - -/* - * Unfortunately ALT_CALL_NO_ARG() above can't use a fake initializer (to - * suppress "uninitialized variable" warnings), as various versions of gcc - * older than 8.1 fall on the nose in various ways with that (always because - * of some other construct elsewhere in the same function needing to use the - * same hard register). Otherwise the asm() below could uniformly use "+r" - * output constraints, making unnecessary all these ALT_CALL_OUT macros. - */ -#define ALT_CALL0_OUT "=r" (a1_), "=r" (a2_), "=r" (a3_), \ - "=r" (a4_), "=r" (a5_), "=r" (a6_) -#define ALT_CALL1_OUT "+r" (a1_), "=r" (a2_), "=r" (a3_), \ - "=r" (a4_), "=r" (a5_), "=r" (a6_) -#define ALT_CALL2_OUT "+r" (a1_), "+r" (a2_), "=r" (a3_), \ - "=r" (a4_), "=r" (a5_), "=r" (a6_) -#define ALT_CALL3_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ - "=r" (a4_), "=r" (a5_), "=r" (a6_) -#define ALT_CALL4_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ - "+r" (a4_), "=r" (a5_), "=r" (a6_) -#define ALT_CALL5_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ - "+r" (a4_), "+r" (a5_), "=r" (a6_) -#define ALT_CALL6_OUT "+r" (a1_), "+r" (a2_), "+r" (a3_), \ - "+r" (a4_), "+r" (a5_), "+r" (a6_) - -#define alternative_callN(n, rettype, func) ({ \ - rettype ret_; \ - register unsigned long r10_ asm("r10"); \ - register unsigned long r11_ asm("r11"); \ - asm volatile (ALTERNATIVE("call *%c[addr](%%rip)", "call .", \ - X86_FEATURE_ALWAYS) \ - : ALT_CALL ## n ## _OUT, "=a" (ret_), \ - "=r" (r10_), "=r" (r11_) ASM_CALL_CONSTRAINT \ - : [addr] "i" (&(func)), "g" (func) \ - : "memory" ); \ - ret_; \ -}) - -#define alternative_vcall0(func) ({ \ - ALT_CALL_NO_ARG1; \ - (void)sizeof(func()); \ - (void)alternative_callN(0, int, func); \ -}) - -#define alternative_call0(func) ({ \ - ALT_CALL_NO_ARG1; \ - alternative_callN(0, typeof(func()), func); \ -}) - -#define alternative_vcall1(func, arg) ({ \ - ALT_CALL_ARG(arg, 1); \ - ALT_CALL_NO_ARG2; \ - (void)sizeof(func(arg)); \ - (void)alternative_callN(1, int, func); \ -}) - -#define alternative_call1(func, arg) ({ \ - ALT_CALL_ARG(arg, 1); \ - ALT_CALL_NO_ARG2; \ - alternative_callN(1, typeof(func(arg)), func); \ -}) - -#define alternative_vcall2(func, arg1, arg2) ({ \ - typeof(arg2) v2_ = (arg2); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_NO_ARG3; \ - (void)sizeof(func(arg1, arg2)); \ - (void)alternative_callN(2, int, func); \ -}) - -#define alternative_call2(func, arg1, arg2) ({ \ - typeof(arg2) v2_ = (arg2); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_NO_ARG3; \ - alternative_callN(2, typeof(func(arg1, arg2)), func); \ -}) - -#define alternative_vcall3(func, arg1, arg2, arg3) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_NO_ARG4; \ - (void)sizeof(func(arg1, arg2, arg3)); \ - (void)alternative_callN(3, int, func); \ -}) - -#define alternative_call3(func, arg1, arg2, arg3) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_NO_ARG4; \ - alternative_callN(3, typeof(func(arg1, arg2, arg3)), \ - func); \ -}) - -#define alternative_vcall4(func, arg1, arg2, arg3, arg4) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - typeof(arg4) v4_ = (arg4); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_ARG(v4_, 4); \ - ALT_CALL_NO_ARG5; \ - (void)sizeof(func(arg1, arg2, arg3, arg4)); \ - (void)alternative_callN(4, int, func); \ -}) - -#define alternative_call4(func, arg1, arg2, arg3, arg4) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - typeof(arg4) v4_ = (arg4); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_ARG(v4_, 4); \ - ALT_CALL_NO_ARG5; \ - alternative_callN(4, typeof(func(arg1, arg2, \ - arg3, arg4)), \ - func); \ -}) - -#define alternative_vcall5(func, arg1, arg2, arg3, arg4, arg5) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - typeof(arg4) v4_ = (arg4); \ - typeof(arg5) v5_ = (arg5); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_ARG(v4_, 4); \ - ALT_CALL_ARG(v5_, 5); \ - ALT_CALL_NO_ARG6; \ - (void)sizeof(func(arg1, arg2, arg3, arg4, arg5)); \ - (void)alternative_callN(5, int, func); \ -}) - -#define alternative_call5(func, arg1, arg2, arg3, arg4, arg5) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - typeof(arg4) v4_ = (arg4); \ - typeof(arg5) v5_ = (arg5); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_ARG(v4_, 4); \ - ALT_CALL_ARG(v5_, 5); \ - ALT_CALL_NO_ARG6; \ - alternative_callN(5, typeof(func(arg1, arg2, arg3, \ - arg4, arg5)), \ - func); \ -}) - -#define alternative_vcall6(func, arg1, arg2, arg3, arg4, arg5, arg6) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - typeof(arg4) v4_ = (arg4); \ - typeof(arg5) v5_ = (arg5); \ - typeof(arg6) v6_ = (arg6); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_ARG(v4_, 4); \ - ALT_CALL_ARG(v5_, 5); \ - ALT_CALL_ARG(v6_, 6); \ - (void)sizeof(func(arg1, arg2, arg3, arg4, arg5, arg6)); \ - (void)alternative_callN(6, int, func); \ -}) - -#define alternative_call6(func, arg1, arg2, arg3, arg4, arg5, arg6) ({ \ - typeof(arg2) v2_ = (arg2); \ - typeof(arg3) v3_ = (arg3); \ - typeof(arg4) v4_ = (arg4); \ - typeof(arg5) v5_ = (arg5); \ - typeof(arg6) v6_ = (arg6); \ - ALT_CALL_ARG(arg1, 1); \ - ALT_CALL_ARG(v2_, 2); \ - ALT_CALL_ARG(v3_, 3); \ - ALT_CALL_ARG(v4_, 4); \ - ALT_CALL_ARG(v5_, 5); \ - ALT_CALL_ARG(v6_, 6); \ - alternative_callN(6, typeof(func(arg1, arg2, arg3, \ - arg4, arg5, arg6)), \ - func); \ -}) - -#define alternative_vcall__(nr) alternative_vcall ## nr -#define alternative_call__(nr) alternative_call ## nr - -#define alternative_vcall_(nr) alternative_vcall__(nr) -#define alternative_call_(nr) alternative_call__(nr) - -#define alternative_vcall(func, args...) \ - alternative_vcall_(count_args(args))(func, ## args) - -#define alternative_call(func, args...) \ - alternative_call_(count_args(args))(func, ## args) - -#endif /* !__ASSEMBLY__ */ - -#endif /* __X86_ALTERNATIVE_H__ */ diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h deleted file mode 100644 index b206e95863..0000000000 --- a/xen/include/asm-x86/altp2m.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Alternate p2m HVM - * Copyright (c) 2014, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_ALTP2M_H -#define __ASM_X86_ALTP2M_H - -#ifdef CONFIG_HVM - -#include -#include /* for struct vcpu, struct domain */ -#include /* for vcpu_altp2m */ - -/* Alternate p2m HVM on/off per domain */ -static inline bool altp2m_active(const struct domain *d) -{ - return d->arch.altp2m_active; -} - -/* Alternate p2m VCPU */ -void altp2m_vcpu_initialise(struct vcpu *v); -void altp2m_vcpu_destroy(struct vcpu *v); - -int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn); -void altp2m_vcpu_disable_ve(struct vcpu *v); - -static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v) -{ - return vcpu_altp2m(v).p2midx; -} -#else - -static inline bool altp2m_active(const struct domain *d) -{ - return false; -} - -/* Only declaration is needed. DCE will optimise it out when linking. */ -uint16_t altp2m_vcpu_idx(const struct vcpu *v); -void altp2m_vcpu_disable_ve(struct vcpu *v); - -#endif - -#endif /* __ASM_X86_ALTP2M_H */ diff --git a/xen/include/asm-x86/amd.h b/xen/include/asm-x86/amd.h deleted file mode 100644 index a82382e6bf..0000000000 --- a/xen/include/asm-x86/amd.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * amd.h - AMD processor specific definitions - */ - -#ifndef __AMD_H__ -#define __AMD_H__ - -#include - -/* CPUID masked for use by AMD-V Extended Migration */ - -/* Family 0Fh, Revision C */ -#define AMD_FEATURES_K8_REV_C_ECX 0 -#define AMD_FEATURES_K8_REV_C_EDX ( \ - cpufeat_mask(X86_FEATURE_FPU) | cpufeat_mask(X86_FEATURE_VME) | \ - cpufeat_mask(X86_FEATURE_DE) | cpufeat_mask(X86_FEATURE_PSE) | \ - cpufeat_mask(X86_FEATURE_TSC) | cpufeat_mask(X86_FEATURE_MSR) | \ - cpufeat_mask(X86_FEATURE_PAE) | cpufeat_mask(X86_FEATURE_MCE) | \ - cpufeat_mask(X86_FEATURE_CX8) | cpufeat_mask(X86_FEATURE_APIC) | \ - cpufeat_mask(X86_FEATURE_SEP) | cpufeat_mask(X86_FEATURE_MTRR) | \ - cpufeat_mask(X86_FEATURE_PGE) | cpufeat_mask(X86_FEATURE_MCA) | \ - cpufeat_mask(X86_FEATURE_CMOV) | cpufeat_mask(X86_FEATURE_PAT) | \ - cpufeat_mask(X86_FEATURE_PSE36) | cpufeat_mask(X86_FEATURE_CLFLUSH)| \ - cpufeat_mask(X86_FEATURE_MMX) | cpufeat_mask(X86_FEATURE_FXSR) | \ - cpufeat_mask(X86_FEATURE_SSE) | cpufeat_mask(X86_FEATURE_SSE2)) -#define AMD_EXTFEATURES_K8_REV_C_ECX 0 -#define AMD_EXTFEATURES_K8_REV_C_EDX ( \ - cpufeat_mask(X86_FEATURE_FPU) | cpufeat_mask(X86_FEATURE_VME) | \ - cpufeat_mask(X86_FEATURE_DE) | cpufeat_mask(X86_FEATURE_PSE) | \ - cpufeat_mask(X86_FEATURE_TSC) | cpufeat_mask(X86_FEATURE_MSR) | \ - cpufeat_mask(X86_FEATURE_PAE) | cpufeat_mask(X86_FEATURE_MCE) | \ - cpufeat_mask(X86_FEATURE_CX8) | cpufeat_mask(X86_FEATURE_APIC) | \ - cpufeat_mask(X86_FEATURE_SYSCALL) | cpufeat_mask(X86_FEATURE_MTRR) | \ - cpufeat_mask(X86_FEATURE_PGE) | cpufeat_mask(X86_FEATURE_MCA) | \ - cpufeat_mask(X86_FEATURE_CMOV) | cpufeat_mask(X86_FEATURE_PAT) | \ - cpufeat_mask(X86_FEATURE_PSE36) | cpufeat_mask(X86_FEATURE_NX) | \ - cpufeat_mask(X86_FEATURE_MMXEXT) | cpufeat_mask(X86_FEATURE_MMX) | \ - cpufeat_mask(X86_FEATURE_FXSR) | cpufeat_mask(X86_FEATURE_LM) | \ - cpufeat_mask(X86_FEATURE_3DNOWEXT) | cpufeat_mask(X86_FEATURE_3DNOW)) - -/* Family 0Fh, Revision D */ -#define AMD_FEATURES_K8_REV_D_ECX AMD_FEATURES_K8_REV_C_ECX -#define AMD_FEATURES_K8_REV_D_EDX AMD_FEATURES_K8_REV_C_EDX -#define AMD_EXTFEATURES_K8_REV_D_ECX (AMD_EXTFEATURES_K8_REV_C_ECX |\ - cpufeat_mask(X86_FEATURE_LAHF_LM)) -#define AMD_EXTFEATURES_K8_REV_D_EDX (AMD_EXTFEATURES_K8_REV_C_EDX |\ - cpufeat_mask(X86_FEATURE_FFXSR)) - -/* Family 0Fh, Revision E */ -#define AMD_FEATURES_K8_REV_E_ECX (AMD_FEATURES_K8_REV_D_ECX | \ - cpufeat_mask(X86_FEATURE_SSE3)) -#define AMD_FEATURES_K8_REV_E_EDX (AMD_FEATURES_K8_REV_D_EDX | \ - cpufeat_mask(X86_FEATURE_HTT)) -#define AMD_EXTFEATURES_K8_REV_E_ECX (AMD_EXTFEATURES_K8_REV_D_ECX |\ - cpufeat_mask(X86_FEATURE_CMP_LEGACY)) -#define AMD_EXTFEATURES_K8_REV_E_EDX AMD_EXTFEATURES_K8_REV_D_EDX - -/* Family 0Fh, Revision F */ -#define AMD_FEATURES_K8_REV_F_ECX (AMD_FEATURES_K8_REV_E_ECX | \ - cpufeat_mask(X86_FEATURE_CX16)) -#define AMD_FEATURES_K8_REV_F_EDX AMD_FEATURES_K8_REV_E_EDX -#define AMD_EXTFEATURES_K8_REV_F_ECX (AMD_EXTFEATURES_K8_REV_E_ECX |\ - cpufeat_mask(X86_FEATURE_SVM) | cpufeat_mask(X86_FEATURE_EXTAPIC) | \ - cpufeat_mask(X86_FEATURE_CR8_LEGACY)) -#define AMD_EXTFEATURES_K8_REV_F_EDX (AMD_EXTFEATURES_K8_REV_E_EDX |\ - cpufeat_mask(X86_FEATURE_RDTSCP)) - -/* Family 0Fh, Revision G */ -#define AMD_FEATURES_K8_REV_G_ECX AMD_FEATURES_K8_REV_F_ECX -#define AMD_FEATURES_K8_REV_G_EDX AMD_FEATURES_K8_REV_F_EDX -#define AMD_EXTFEATURES_K8_REV_G_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\ - cpufeat_mask(X86_FEATURE_3DNOWPREFETCH)) -#define AMD_EXTFEATURES_K8_REV_G_EDX AMD_EXTFEATURES_K8_REV_F_EDX - -/* Family 10h, Revision B */ -#define AMD_FEATURES_FAM10h_REV_B_ECX (AMD_FEATURES_K8_REV_F_ECX | \ - cpufeat_mask(X86_FEATURE_POPCNT) | cpufeat_mask(X86_FEATURE_MONITOR)) -#define AMD_FEATURES_FAM10h_REV_B_EDX AMD_FEATURES_K8_REV_F_EDX -#define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\ - cpufeat_mask(X86_FEATURE_ABM) | cpufeat_mask(X86_FEATURE_SSE4A) | \ - cpufeat_mask(X86_FEATURE_MISALIGNSSE) | cpufeat_mask(X86_FEATURE_OSVW) |\ - cpufeat_mask(X86_FEATURE_IBS)) -#define AMD_EXTFEATURES_FAM10h_REV_B_EDX (AMD_EXTFEATURES_K8_REV_F_EDX |\ - cpufeat_mask(X86_FEATURE_PAGE1GB)) - -/* Family 10h, Revision C */ -#define AMD_FEATURES_FAM10h_REV_C_ECX AMD_FEATURES_FAM10h_REV_B_ECX -#define AMD_FEATURES_FAM10h_REV_C_EDX AMD_FEATURES_FAM10h_REV_B_EDX -#define AMD_EXTFEATURES_FAM10h_REV_C_ECX (AMD_EXTFEATURES_FAM10h_REV_B_ECX |\ - cpufeat_mask(X86_FEATURE_SKINIT) | cpufeat_mask(X86_FEATURE_WDT)) -#define AMD_EXTFEATURES_FAM10h_REV_C_EDX AMD_EXTFEATURES_FAM10h_REV_B_EDX - -/* Family 11h, Revision B */ -#define AMD_FEATURES_FAM11h_REV_B_ECX AMD_FEATURES_K8_REV_G_ECX -#define AMD_FEATURES_FAM11h_REV_B_EDX AMD_FEATURES_K8_REV_G_EDX -#define AMD_EXTFEATURES_FAM11h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_G_ECX |\ - cpufeat_mask(X86_FEATURE_SKINIT)) -#define AMD_EXTFEATURES_FAM11h_REV_B_EDX AMD_EXTFEATURES_K8_REV_G_EDX - -/* AMD errata checking - * - * Errata are defined using the AMD_LEGACY_ERRATUM() or AMD_OSVW_ERRATUM() - * macros. The latter is intended for newer errata that have an OSVW id - * assigned, which it takes as first argument. Both take a variable number - * of family-specific model-stepping ranges created by AMD_MODEL_RANGE(). - * - * Example 1: - * #define AMD_ERRATUM_319 \ - * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), \ - * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), \ - * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)) - * Example 2: - * #define AMD_ERRATUM_400 \ - * AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), \ - * AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)) - * - */ - -#define AMD_LEGACY_ERRATUM(...) -1 /* legacy */, __VA_ARGS__, 0 -#define AMD_OSVW_ERRATUM(osvw_id, ...) osvw_id, __VA_ARGS__, 0 -#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ - ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) - -#define AMD_ERRATUM_121 \ - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0x3f, 0xf)) - -#define AMD_ERRATUM_170 \ - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0x67, 0xf)) - -#define AMD_ERRATUM_383 \ - AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf), \ - AMD_MODEL_RANGE(0x12, 0x0, 0x0, 0x1, 0x0)) - -#define AMD_ERRATUM_573 \ - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0xff, 0xf), \ - AMD_MODEL_RANGE(0x10, 0x0, 0x0, 0xff, 0xf), \ - AMD_MODEL_RANGE(0x11, 0x0, 0x0, 0xff, 0xf), \ - AMD_MODEL_RANGE(0x12, 0x0, 0x0, 0xff, 0xf)) - -struct cpuinfo_x86; -int cpu_has_amd_erratum(const struct cpuinfo_x86 *, int, ...); - -extern s8 opt_allow_unsafe; - -void fam10h_check_enable_mmcfg(void); -void check_enable_amd_mmconf_dmi(void); - -extern bool amd_acpi_c1e_quirk; -void amd_check_disable_c1e(unsigned int port, u8 value); - -#endif /* __AMD_H__ */ diff --git a/xen/include/asm-x86/apic.h b/xen/include/asm-x86/apic.h deleted file mode 100644 index 2fe54bbf1c..0000000000 --- a/xen/include/asm-x86/apic.h +++ /dev/null @@ -1,202 +0,0 @@ -#ifndef __ASM_APIC_H -#define __ASM_APIC_H - -#include -#include -#include - -#define Dprintk(x...) do {} while (0) - -/* - * Debugging macros - */ -#define APIC_QUIET 0 -#define APIC_VERBOSE 1 -#define APIC_DEBUG 2 - -#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) - -/* Possible APIC states */ -enum apic_mode { - APIC_MODE_INVALID, /* Not set yet */ - APIC_MODE_DISABLED, /* If uniprocessor, or MP in uniprocessor mode */ - APIC_MODE_XAPIC, /* xAPIC mode - default upon chipset reset */ - APIC_MODE_X2APIC /* x2APIC mode - common for large MP machines */ -}; - -extern bool iommu_x2apic_enabled; -extern u8 apic_verbosity; -extern bool directed_eoi_enabled; - -void check_x2apic_preenabled(void); -void x2apic_bsp_setup(void); -void x2apic_ap_setup(void); -const struct genapic *apic_x2apic_probe(void); - -/* - * Define the default level of output to be very little - * This can be turned up by using apic=verbose for more - * information and apic=debug for _lots_ of information. - * apic_verbosity is defined in apic.c - */ -#define apic_printk(v, s, a...) do { \ - if ((v) <= apic_verbosity) \ - printk(s, ##a); \ - } while (0) - - -/* - * Basic functions accessing APICs. - */ - -static __inline void apic_mem_write(unsigned long reg, u32 v) -{ - *((volatile u32 *)(APIC_BASE+reg)) = v; -} - -static __inline void apic_mem_write_atomic(unsigned long reg, u32 v) -{ - (void)xchg((volatile u32 *)(APIC_BASE+reg), v); -} - -static __inline u32 apic_mem_read(unsigned long reg) -{ - return *((volatile u32 *)(APIC_BASE+reg)); -} - -/* NOTE: in x2APIC mode, we should use apic_icr_write()/apic_icr_read() to - * access the 64-bit ICR register. - */ - -static __inline void apic_wrmsr(unsigned long reg, uint64_t msr_content) -{ - if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || - reg == APIC_LVR) - return; - - wrmsrl(APIC_MSR_BASE + (reg >> 4), msr_content); -} - -static __inline uint64_t apic_rdmsr(unsigned long reg) -{ - uint64_t msr_content; - - if (reg == APIC_DFR) - return -1u; - - rdmsrl(APIC_MSR_BASE + (reg >> 4), msr_content); - return msr_content; -} - -static __inline void apic_write(unsigned long reg, u32 v) -{ - - if ( x2apic_enabled ) - apic_wrmsr(reg, v); - else - apic_mem_write(reg, v); -} - -static __inline void apic_write_atomic(unsigned long reg, u32 v) -{ - if ( x2apic_enabled ) - apic_wrmsr(reg, v); - else - apic_mem_write_atomic(reg, v); -} - -static __inline u32 apic_read(unsigned long reg) -{ - if ( x2apic_enabled ) - return apic_rdmsr(reg); - else - return apic_mem_read(reg); -} - -static __inline u64 apic_icr_read(void) -{ - u32 lo, hi; - - if ( x2apic_enabled ) - return apic_rdmsr(APIC_ICR); - else - { - lo = apic_mem_read(APIC_ICR); - hi = apic_mem_read(APIC_ICR2); - } - - return ((u64)lo) | (((u64)hi) << 32); -} - -static __inline void apic_icr_write(u32 low, u32 dest) -{ - if ( x2apic_enabled ) - apic_wrmsr(APIC_ICR, low | ((uint64_t)dest << 32)); - else - { - apic_mem_write(APIC_ICR2, dest << 24); - apic_mem_write(APIC_ICR, low); - } -} - -static __inline bool_t apic_isr_read(u8 vector) -{ - return (apic_read(APIC_ISR + ((vector & ~0x1f) >> 1)) >> - (vector & 0x1f)) & 1; -} - -static __inline u32 get_apic_id(void) /* Get the physical APIC id */ -{ - u32 id = apic_read(APIC_ID); - return x2apic_enabled ? id : GET_xAPIC_ID(id); -} - -void apic_wait_icr_idle(void); - -int get_physical_broadcast(void); - -static inline void ack_APIC_irq(void) -{ - /* Docs say use 0 for future compatibility */ - apic_write(APIC_EOI, 0); -} - -extern int get_maxlvt(void); -extern void clear_local_APIC(void); -extern void connect_bsp_APIC (void); -extern void disconnect_bsp_APIC (int virt_wire_setup); -extern void disable_local_APIC (void); -extern int verify_local_APIC (void); -extern void cache_APIC_registers (void); -extern void sync_Arb_IDs (void); -extern void init_bsp_APIC (void); -extern void setup_local_APIC(bool bsp); -extern void init_apic_mappings (void); -extern void smp_local_timer_interrupt (struct cpu_user_regs *regs); -extern void setup_boot_APIC_clock (void); -extern void setup_secondary_APIC_clock (void); -extern void setup_apic_nmi_watchdog (void); -extern void disable_lapic_nmi_watchdog(void); -extern int reserve_lapic_nmi(void); -extern void release_lapic_nmi(void); -extern void self_nmi(void); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern bool nmi_watchdog_tick(const struct cpu_user_regs *regs); -extern int APIC_init_uniprocessor (void); -extern void disable_APIC_timer(void); -extern void enable_APIC_timer(void); -extern int lapic_suspend(void); -extern int lapic_resume(void); -extern void record_boot_APIC_mode(void); -extern enum apic_mode current_local_apic_mode(void); -extern void check_for_unexpected_msi(unsigned int vector); - -extern void check_nmi_watchdog(void); - -extern unsigned int nmi_watchdog; -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 - -#endif /* __ASM_APIC_H */ diff --git a/xen/include/asm-x86/apicdef.h b/xen/include/asm-x86/apicdef.h deleted file mode 100644 index 0633da9fe1..0000000000 --- a/xen/include/asm-x86/apicdef.h +++ /dev/null @@ -1,134 +0,0 @@ -#ifndef __ASM_APICDEF_H -#define __ASM_APICDEF_H - -/* - * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) - * - * Alan Cox , 1995. - * Ingo Molnar , 1999, 2000 - */ - -#define APIC_DEFAULT_PHYS_BASE 0xfee00000 - -#define APIC_ID 0x20 -#define APIC_ID_MASK (0xFFu<<24) -#define GET_xAPIC_ID(x) (((x)>>24)&0xFFu) -#define SET_xAPIC_ID(x) (((x)<<24)) -#define APIC_LVR 0x30 -#define APIC_LVR_MASK 0xFF00FF -#define APIC_LVR_DIRECTED_EOI (1 << 24) -#define GET_APIC_VERSION(x) ((x)&0xFF) -#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) -#define APIC_XAPIC(x) ((x) >= 0x14) -#define APIC_TASKPRI 0x80 -#define APIC_TPRI_MASK 0xFF -#define APIC_ARBPRI 0x90 -#define APIC_ARBPRI_MASK 0xFF -#define APIC_PROCPRI 0xA0 -#define APIC_EOI 0xB0 -#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ -#define APIC_RRR 0xC0 -#define APIC_LDR 0xD0 -#define APIC_LDR_MASK (0xFFu<<24) -#define GET_xAPIC_LOGICAL_ID(x) (((x)>>24)&0xFF) -#define SET_xAPIC_LOGICAL_ID(x) (((x)<<24)) -#define APIC_ALL_CPUS 0xFF -#define APIC_DFR 0xE0 -#define APIC_DFR_CLUSTER 0x0FFFFFFFul -#define APIC_DFR_FLAT 0xFFFFFFFFul -#define APIC_SPIV 0xF0 -#define APIC_SPIV_FOCUS_DISABLED (1<<9) -#define APIC_SPIV_APIC_ENABLED (1<<8) -#define APIC_SPIV_DIRECTED_EOI (1<<12) -#define APIC_ISR 0x100 -#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ -#define APIC_TMR 0x180 -#define APIC_IRR 0x200 -#define APIC_ESR 0x280 -#define APIC_ESR_SEND_CS 0x00001 -#define APIC_ESR_RECV_CS 0x00002 -#define APIC_ESR_SEND_ACC 0x00004 -#define APIC_ESR_RECV_ACC 0x00008 -#define APIC_ESR_SENDILL 0x00020 -#define APIC_ESR_RECVILL 0x00040 -#define APIC_ESR_ILLREGA 0x00080 -#define APIC_ICR 0x300 -#define APIC_DEST_NOSHORT 0x00000 -#define APIC_DEST_SELF 0x40000 -#define APIC_DEST_ALLINC 0x80000 -#define APIC_DEST_ALLBUT 0xC0000 -#define APIC_SHORT_MASK 0xC0000 -#define APIC_ICR_RR_MASK 0x30000 -#define APIC_ICR_RR_INVALID 0x00000 -#define APIC_ICR_RR_INPROG 0x10000 -#define APIC_ICR_RR_VALID 0x20000 -#define APIC_INT_LEVELTRIG 0x08000 -#define APIC_INT_ASSERT 0x04000 -#define APIC_ICR_BUSY 0x01000 -#define APIC_DEST_MASK 0x00800 -#define APIC_DEST_LOGICAL 0x00800 -#define APIC_DEST_PHYSICAL 0x00000 -#define APIC_DM_FIXED 0x00000 -#define APIC_DM_LOWEST 0x00100 -#define APIC_DM_SMI 0x00200 -#define APIC_DM_REMRD 0x00300 -#define APIC_DM_NMI 0x00400 -#define APIC_DM_INIT 0x00500 -#define APIC_DM_STARTUP 0x00600 -#define APIC_DM_EXTINT 0x00700 -#define APIC_VECTOR_MASK 0x000FF -#define APIC_ICR2 0x310 -#define GET_xAPIC_DEST_FIELD(x) (((x)>>24)&0xFF) -#define SET_xAPIC_DEST_FIELD(x) ((x)<<24) -#define APIC_LVTT 0x320 -#define APIC_LVTTHMR 0x330 -#define APIC_LVTPC 0x340 -#define APIC_LVT0 0x350 -#define APIC_CMCI 0x2F0 - -#define APIC_TIMER_MODE_MASK (0x3<<17) -#define APIC_TIMER_MODE_ONESHOT (0x0<<17) -#define APIC_TIMER_MODE_PERIODIC (0x1<<17) -#define APIC_TIMER_MODE_TSC_DEADLINE (0x2<<17) -#define APIC_LVT_MASKED (1<<16) -#define APIC_LVT_LEVEL_TRIGGER (1<<15) -#define APIC_LVT_REMOTE_IRR (1<<14) -#define APIC_INPUT_POLARITY (1<<13) -#define APIC_SEND_PENDING (1<<12) -#define APIC_MODE_MASK 0x700 -#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) -#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) -#define APIC_MODE_FIXED 0x0 -#define APIC_MODE_NMI 0x4 -#define APIC_MODE_EXTINT 0x7 -#define APIC_LVT1 0x360 -#define APIC_LVTERR 0x370 -#define APIC_TMICT 0x380 -#define APIC_TMCCT 0x390 -#define APIC_TDCR 0x3E0 -#define APIC_TDR_DIV_TMBASE (1<<2) -#define APIC_TDR_DIV_1 0xB -#define APIC_TDR_DIV_2 0x0 -#define APIC_TDR_DIV_4 0x1 -#define APIC_TDR_DIV_8 0x2 -#define APIC_TDR_DIV_16 0x3 -#define APIC_TDR_DIV_32 0x8 -#define APIC_TDR_DIV_64 0x9 -#define APIC_TDR_DIV_128 0xA - -/* Only available in x2APIC mode */ -#define APIC_SELF_IPI 0x3F0 - -/* Applicable to vectors, TPR, and PPR. */ -#define APIC_PRIO_CLASS(v) ((v) & 0xF0) - -#define APIC_BASE __fix_to_virt(FIX_APIC_BASE) - -/* It's only used in x2APIC mode of an x2APIC unit. */ -#define APIC_MSR_BASE 0x800 - -#define MAX_IO_APICS 128 - -extern bool x2apic_enabled; - -#endif diff --git a/xen/include/asm-x86/asm-defns.h b/xen/include/asm-x86/asm-defns.h deleted file mode 100644 index 505f39ad5f..0000000000 --- a/xen/include/asm-x86/asm-defns.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef HAVE_AS_CLAC_STAC -.macro clac - .byte 0x0f, 0x01, 0xca -.endm - -.macro stac - .byte 0x0f, 0x01, 0xcb -.endm -#endif - -.macro vmrun - .byte 0x0f, 0x01, 0xd8 -.endm - -.macro stgi - .byte 0x0f, 0x01, 0xdc -.endm - -.macro clgi - .byte 0x0f, 0x01, 0xdd -.endm - -.macro INDIRECT_BRANCH insn:req arg:req -/* - * Create an indirect branch. insn is one of call/jmp, arg is a single - * register. - * - * With no compiler support, this degrades into a plain indirect call/jmp. - * With compiler support, dispatch to the correct __x86_indirect_thunk_* - */ - .if CONFIG_INDIRECT_THUNK == 1 - - $done = 0 - .irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15 - .ifeqs "\arg", "%r\reg" - \insn __x86_indirect_thunk_r\reg - $done = 1 - .exitm - .endif - .endr - - .if $done != 1 - .error "Bad register arg \arg" - .endif - - .else - \insn *\arg - .endif -.endm - -/* Convenience wrappers. */ -.macro INDIRECT_CALL arg:req - INDIRECT_BRANCH call \arg -.endm - -.macro INDIRECT_JMP arg:req - INDIRECT_BRANCH jmp \arg -.endm - -.macro guest_access_mask_ptr ptr:req, scratch1:req, scratch2:req -#if defined(CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS) - /* - * Here we want - * - * ptr &= ~0ull >> (ptr < HYPERVISOR_VIRT_END); - * - * but guaranteed without any conditional branches (hence in assembly). - */ - mov $(HYPERVISOR_VIRT_END - 1), \scratch1 - mov $~0, \scratch2 - cmp \ptr, \scratch1 - rcr $1, \scratch2 - and \scratch2, \ptr -#elif defined(CONFIG_DEBUG) && defined(CONFIG_PV) - xor $~\@, \scratch1 - xor $~\@, \scratch2 -#endif -.endm diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h deleted file mode 100644 index d9431180cf..0000000000 --- a/xen/include/asm-x86/asm_defns.h +++ /dev/null @@ -1,354 +0,0 @@ - -#ifndef __X86_ASM_DEFNS_H__ -#define __X86_ASM_DEFNS_H__ - -#ifndef COMPILE_OFFSETS -/* NB. Auto-generated from arch/.../asm-offsets.c */ -#include -#endif -#include -#include -#include -#include -#include - -#ifdef __ASSEMBLY__ -#include -#ifndef CONFIG_INDIRECT_THUNK -.equ CONFIG_INDIRECT_THUNK, 0 -#endif -#else -#include -asm ( "\t.equ CONFIG_INDIRECT_THUNK, " - __stringify(IS_ENABLED(CONFIG_INDIRECT_THUNK)) ); -#endif - -#ifndef __ASSEMBLY__ - -/* - * This output constraint should be used for any inline asm which has a "call" - * instruction. Otherwise the asm may be inserted before the frame pointer - * gets set up by the containing function. - */ -#ifdef CONFIG_FRAME_POINTER -register unsigned long current_stack_pointer asm("rsp"); -# define ASM_CALL_CONSTRAINT , "+r" (current_stack_pointer) -#else -# define ASM_CALL_CONSTRAINT -#endif - -#endif - -#ifndef NDEBUG -#define ASSERT_INTERRUPT_STATUS(x, msg) \ - pushf; \ - testb $X86_EFLAGS_IF>>8,1(%rsp); \ - j##x 1f; \ - ASSERT_FAILED(msg); \ -1: addq $8,%rsp; -#else -#define ASSERT_INTERRUPT_STATUS(x, msg) -#endif - -#define ASSERT_INTERRUPTS_ENABLED \ - ASSERT_INTERRUPT_STATUS(nz, "INTERRUPTS ENABLED") -#define ASSERT_INTERRUPTS_DISABLED \ - ASSERT_INTERRUPT_STATUS(z, "INTERRUPTS DISABLED") - -#ifdef __ASSEMBLY__ -# define _ASM_EX(p) p-. -#else -# define _ASM_EX(p) #p "-." -#endif - -/* Exception table entry */ -#ifdef __ASSEMBLY__ -# define _ASM__EXTABLE(sfx, from, to) \ - .section .ex_table##sfx, "a" ; \ - .balign 4 ; \ - .long _ASM_EX(from), _ASM_EX(to) ; \ - .previous -#else -# define _ASM__EXTABLE(sfx, from, to) \ - " .section .ex_table" #sfx ",\"a\"\n" \ - " .balign 4\n" \ - " .long " _ASM_EX(from) ", " _ASM_EX(to) "\n" \ - " .previous\n" -#endif - -#define _ASM_EXTABLE(from, to) _ASM__EXTABLE(, from, to) -#define _ASM_PRE_EXTABLE(from, to) _ASM__EXTABLE(.pre, from, to) - -#ifdef __ASSEMBLY__ - -#ifdef HAVE_AS_QUOTED_SYM -#define SUBSECTION_LBL(tag) \ - .ifndef .L.tag; \ - .equ .L.tag, 1; \ - .equ __stringify(__OBJECT_LABEL__.tag), .; \ - .endif -#else -#define SUBSECTION_LBL(tag) \ - .ifndef __OBJECT_LABEL__.tag; \ - __OBJECT_LABEL__.tag:; \ - .endif -#endif - -#define UNLIKELY_START(cond, tag) \ - .Ldispatch.tag: \ - j##cond .Lunlikely.tag; \ - .subsection 1; \ - SUBSECTION_LBL(unlikely); \ - .Lunlikely.tag: - -#define UNLIKELY_DISPATCH_LABEL(tag) \ - .Ldispatch.tag - -#define UNLIKELY_DONE(cond, tag) \ - j##cond .Llikely.tag - -#define __UNLIKELY_END(tag) \ - .subsection 0; \ - .Llikely.tag: - -#define UNLIKELY_END(tag) \ - UNLIKELY_DONE(mp, tag); \ - __UNLIKELY_END(tag) - - .equ .Lrax, 0 - .equ .Lrcx, 1 - .equ .Lrdx, 2 - .equ .Lrbx, 3 - .equ .Lrsp, 4 - .equ .Lrbp, 5 - .equ .Lrsi, 6 - .equ .Lrdi, 7 - .equ .Lr8, 8 - .equ .Lr9, 9 - .equ .Lr10, 10 - .equ .Lr11, 11 - .equ .Lr12, 12 - .equ .Lr13, 13 - .equ .Lr14, 14 - .equ .Lr15, 15 - -#define STACK_CPUINFO_FIELD(field) (1 - CPUINFO_sizeof + CPUINFO_##field) -#define GET_STACK_END(reg) \ - .if .Lr##reg >= 8; \ - movq $STACK_SIZE-1, %r##reg; \ - .else; \ - movl $STACK_SIZE-1, %e##reg; \ - .endif; \ - orq %rsp, %r##reg - -#define GET_CPUINFO_FIELD(field, reg) \ - GET_STACK_END(reg); \ - addq $STACK_CPUINFO_FIELD(field), %r##reg - -#define GET_CURRENT(reg) \ - GET_STACK_END(reg); \ - movq STACK_CPUINFO_FIELD(current_vcpu)(%r##reg), %r##reg - -#ifndef NDEBUG -#define ASSERT_NOT_IN_ATOMIC \ - sti; /* sometimes called with interrupts disabled: safe to enable */ \ - call ASSERT_NOT_IN_ATOMIC -#else -#define ASSERT_NOT_IN_ATOMIC -#endif - -#define CPUINFO_FEATURE_OFFSET(feature) \ - (CPUINFO_features + (cpufeat_word(feature) * 4)) - -#else - -#ifdef HAVE_AS_QUOTED_SYM -#define SUBSECTION_LBL(tag) \ - ".ifndef .L." #tag "\n\t" \ - ".equ .L." #tag ", 1\n\t" \ - ".equ \"" __stringify(__OBJECT_LABEL__) "." #tag "\", .\n\t" \ - ".endif" -#else -#define SUBSECTION_LBL(tag) \ - ".ifndef " __stringify(__OBJECT_LABEL__) "." #tag "\n\t" \ - __stringify(__OBJECT_LABEL__) "." #tag ":\n\t" \ - ".endif" -#endif - -#ifdef __clang__ /* clang's builtin assember can't do .subsection */ - -#define UNLIKELY_START_SECTION ".pushsection .text.unlikely,\"ax\"" -#define UNLIKELY_END_SECTION ".popsection" - -#else - -#define UNLIKELY_START_SECTION ".subsection 1" -#define UNLIKELY_END_SECTION ".subsection 0" - -#endif - -#define UNLIKELY_START(cond, tag) \ - "j" #cond " .Lunlikely." #tag ".%=;\n\t" \ - UNLIKELY_START_SECTION "\n\t" \ - SUBSECTION_LBL(unlikely) "\n" \ - ".Lunlikely." #tag ".%=:" - -#define UNLIKELY_END(tag) \ - "jmp .Llikely." #tag ".%=;\n\t" \ - UNLIKELY_END_SECTION "\n" \ - ".Llikely." #tag ".%=:" - -static always_inline void clac(void) -{ - /* Note: a barrier is implicit in alternative() */ - alternative("", "clac", X86_FEATURE_XEN_SMAP); -} - -static always_inline void stac(void) -{ - /* Note: a barrier is implicit in alternative() */ - alternative("", "stac", X86_FEATURE_XEN_SMAP); -} -#endif - -#ifdef __ASSEMBLY__ -.macro SAVE_ALL compat=0 - addq $-(UREGS_error_code-UREGS_r15), %rsp - cld - movq %rdi,UREGS_rdi(%rsp) - xor %edi, %edi - movq %rsi,UREGS_rsi(%rsp) - xor %esi, %esi - movq %rdx,UREGS_rdx(%rsp) - xor %edx, %edx - movq %rcx,UREGS_rcx(%rsp) - xor %ecx, %ecx - movq %rax,UREGS_rax(%rsp) - xor %eax, %eax -.if !\compat - movq %r8,UREGS_r8(%rsp) - movq %r9,UREGS_r9(%rsp) - movq %r10,UREGS_r10(%rsp) - movq %r11,UREGS_r11(%rsp) -.endif - xor %r8d, %r8d - xor %r9d, %r9d - xor %r10d, %r10d - xor %r11d, %r11d - movq %rbx,UREGS_rbx(%rsp) - xor %ebx, %ebx - movq %rbp,UREGS_rbp(%rsp) -#ifdef CONFIG_FRAME_POINTER -/* Indicate special exception stack frame by inverting the frame pointer. */ - leaq UREGS_rbp(%rsp), %rbp - notq %rbp -#else - xor %ebp, %ebp -#endif -.if !\compat - movq %r12,UREGS_r12(%rsp) - movq %r13,UREGS_r13(%rsp) - movq %r14,UREGS_r14(%rsp) - movq %r15,UREGS_r15(%rsp) -.endif - xor %r12d, %r12d - xor %r13d, %r13d - xor %r14d, %r14d - xor %r15d, %r15d -.endm - -#define LOAD_ONE_REG(reg, compat) \ -.if !(compat); \ - movq UREGS_r##reg(%rsp),%r##reg; \ -.else; \ - movl UREGS_r##reg(%rsp),%e##reg; \ -.endif - -/* - * Restore all previously saved registers. - * - * @adj: extra stack pointer adjustment to be folded into the adjustment done - * anyway at the end of the macro - * @compat: R8-R15 don't need reloading, but they are clobbered for added - * safety against information leaks. - */ -.macro RESTORE_ALL adj=0 compat=0 -.if !\compat - movq UREGS_r15(%rsp), %r15 - movq UREGS_r14(%rsp), %r14 - movq UREGS_r13(%rsp), %r13 - movq UREGS_r12(%rsp), %r12 -.else - xor %r15d, %r15d - xor %r14d, %r14d - xor %r13d, %r13d - xor %r12d, %r12d -.endif - LOAD_ONE_REG(bp, \compat) - LOAD_ONE_REG(bx, \compat) -.if !\compat - movq UREGS_r11(%rsp),%r11 - movq UREGS_r10(%rsp),%r10 - movq UREGS_r9(%rsp),%r9 - movq UREGS_r8(%rsp),%r8 -.else - xor %r11d, %r11d - xor %r10d, %r10d - xor %r9d, %r9d - xor %r8d, %r8d -.endif - LOAD_ONE_REG(ax, \compat) - LOAD_ONE_REG(cx, \compat) - LOAD_ONE_REG(dx, \compat) - LOAD_ONE_REG(si, \compat) - LOAD_ONE_REG(di, \compat) - subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp -.endm - -#ifdef CONFIG_PV32 -#define CR4_PV32_RESTORE \ - ALTERNATIVE_2 "", \ - "call cr4_pv32_restore", X86_FEATURE_XEN_SMEP, \ - "call cr4_pv32_restore", X86_FEATURE_XEN_SMAP -#else -#define CR4_PV32_RESTORE -#endif - -#include - -#endif - -/* Work around AMD erratum #88 */ -#define safe_swapgs \ - "mfence; swapgs;" - -#ifdef __sun__ -#define REX64_PREFIX "rex64\\" -#elif defined(__clang__) -#define REX64_PREFIX ".byte 0x48; " -#else -#define REX64_PREFIX "rex64/" -#endif - -#define ELFNOTE(name, type, desc) \ - .pushsection .note.name, "a", @note ; \ - .p2align 2 ; \ - .long 2f - 1f /* namesz */ ; \ - .long 4f - 3f /* descsz */ ; \ - .long type /* type */ ; \ -1: .asciz #name /* name */ ; \ -2: .p2align 2 ; \ -3: desc /* desc */ ; \ -4: .p2align 2 ; \ - .popsection - -#define ASM_INT(label, val) \ - .p2align 2; \ -label: .long (val); \ - .size label, . - label; \ - .type label, @object - -#define ASM_CONSTANT(name, value) \ - asm ( ".equ " #name ", %P0; .global " #name \ - :: "i" ((value)) ); -#endif /* __X86_ASM_DEFNS_H__ */ diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h deleted file mode 100644 index 27aad43aaa..0000000000 --- a/xen/include/asm-x86/atomic.h +++ /dev/null @@ -1,239 +0,0 @@ -#ifndef __ARCH_X86_ATOMIC__ -#define __ARCH_X86_ATOMIC__ - -#include -#include - -#define build_read_atomic(name, size, type, reg) \ -static inline type name(const volatile type *addr) \ -{ \ - type ret; \ - asm volatile ( "mov" size " %1,%0" : reg (ret) : "m" (*addr) ); \ - return ret; \ -} - -#define build_write_atomic(name, size, type, reg) \ -static inline void name(volatile type *addr, type val) \ -{ \ - asm volatile ( "mov" size " %1,%0" : "=m" (*addr) : reg (val) ); \ -} - -#define build_add_sized(name, size, type, reg) \ - static inline void name(volatile type *addr, type val) \ - { \ - asm volatile("add" size " %1,%0" \ - : "=m" (*addr) \ - : reg (val)); \ - } - -build_read_atomic(read_u8_atomic, "b", uint8_t, "=q") -build_read_atomic(read_u16_atomic, "w", uint16_t, "=r") -build_read_atomic(read_u32_atomic, "l", uint32_t, "=r") -build_read_atomic(read_u64_atomic, "q", uint64_t, "=r") - -build_write_atomic(write_u8_atomic, "b", uint8_t, "q") -build_write_atomic(write_u16_atomic, "w", uint16_t, "r") -build_write_atomic(write_u32_atomic, "l", uint32_t, "r") -build_write_atomic(write_u64_atomic, "q", uint64_t, "r") - -build_add_sized(add_u8_sized, "b", uint8_t, "qi") -build_add_sized(add_u16_sized, "w", uint16_t, "ri") -build_add_sized(add_u32_sized, "l", uint32_t, "ri") -build_add_sized(add_u64_sized, "q", uint64_t, "ri") - -#undef build_read_atomic -#undef build_write_atomic -#undef build_add_sized - -void __bad_atomic_size(void); - -#define read_atomic(p) ({ \ - unsigned long x_; \ - CLANG_DISABLE_WARN_GCC_COMPAT_START \ - switch ( sizeof(*(p)) ) { \ - case 1: x_ = read_u8_atomic((uint8_t *)(p)); break; \ - case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \ - case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \ - case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \ - default: x_ = 0; __bad_atomic_size(); break; \ - } \ - CLANG_DISABLE_WARN_GCC_COMPAT_END \ - (typeof(*(p)))x_; \ -}) - -#define write_atomic(p, x) ({ \ - typeof(*(p)) __x = (x); \ - /* Check that the pointer is not a const type */ \ - void *__maybe_unused p_ = &__x; \ - unsigned long x_ = (unsigned long)__x; \ - switch ( sizeof(*(p)) ) { \ - case 1: write_u8_atomic((uint8_t *)(p), x_); break; \ - case 2: write_u16_atomic((uint16_t *)(p), x_); break; \ - case 4: write_u32_atomic((uint32_t *)(p), x_); break; \ - case 8: write_u64_atomic((uint64_t *)(p), x_); break; \ - default: __bad_atomic_size(); break; \ - } \ -}) - -#define add_sized(p, x) ({ \ - typeof(*(p)) x_ = (x); \ - switch ( sizeof(*(p)) ) \ - { \ - case 1: add_u8_sized((uint8_t *)(p), x_); break; \ - case 2: add_u16_sized((uint16_t *)(p), x_); break; \ - case 4: add_u32_sized((uint32_t *)(p), x_); break; \ - case 8: add_u64_sized((uint64_t *)(p), x_); break; \ - default: __bad_atomic_size(); break; \ - } \ -}) - -static inline int atomic_read(const atomic_t *v) -{ - return read_atomic(&v->counter); -} - -static inline int _atomic_read(atomic_t v) -{ - return v.counter; -} - -static inline void atomic_set(atomic_t *v, int i) -{ - write_atomic(&v->counter, i); -} - -static inline void _atomic_set(atomic_t *v, int i) -{ - v->counter = i; -} - -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -{ - return cmpxchg(&v->counter, old, new); -} - -static inline void atomic_add(int i, atomic_t *v) -{ - asm volatile ( - "lock; addl %1,%0" - : "=m" (*(volatile int *)&v->counter) - : "ir" (i), "m" (*(volatile int *)&v->counter) ); -} - -static inline int atomic_add_return(int i, atomic_t *v) -{ - return i + arch_fetch_and_add(&v->counter, i); -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - asm volatile ( - "lock; subl %1,%0" - : "=m" (*(volatile int *)&v->counter) - : "ir" (i), "m" (*(volatile int *)&v->counter) ); -} - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - return arch_fetch_and_add(&v->counter, -i) - i; -} - -static inline int atomic_sub_and_test(int i, atomic_t *v) -{ - bool c; - - asm volatile ( "lock; subl %[i], %[counter]\n\t" - ASM_FLAG_OUT(, "setz %[zf]\n\t") - : [counter] "+m" (*(volatile int *)&v->counter), - [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c) - : [i] "ir" (i) : "memory" ); - - return c; -} - -static inline void atomic_inc(atomic_t *v) -{ - asm volatile ( - "lock; incl %0" - : "=m" (*(volatile int *)&v->counter) - : "m" (*(volatile int *)&v->counter) ); -} - -static inline int atomic_inc_return(atomic_t *v) -{ - return atomic_add_return(1, v); -} - -static inline int atomic_inc_and_test(atomic_t *v) -{ - bool c; - - asm volatile ( "lock; incl %[counter]\n\t" - ASM_FLAG_OUT(, "setz %[zf]\n\t") - : [counter] "+m" (*(volatile int *)&v->counter), - [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c) - :: "memory" ); - - return c; -} - -static inline void atomic_dec(atomic_t *v) -{ - asm volatile ( - "lock; decl %0" - : "=m" (*(volatile int *)&v->counter) - : "m" (*(volatile int *)&v->counter) ); -} - -static inline int atomic_dec_return(atomic_t *v) -{ - return atomic_sub_return(1, v); -} - -static inline int atomic_dec_and_test(atomic_t *v) -{ - bool c; - - asm volatile ( "lock; decl %[counter]\n\t" - ASM_FLAG_OUT(, "setz %[zf]\n\t") - : [counter] "+m" (*(volatile int *)&v->counter), - [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c) - :: "memory" ); - - return c; -} - -static inline int atomic_add_negative(int i, atomic_t *v) -{ - bool c; - - asm volatile ( "lock; addl %[i], %[counter]\n\t" - ASM_FLAG_OUT(, "sets %[sf]\n\t") - : [counter] "+m" (*(volatile int *)&v->counter), - [sf] ASM_FLAG_OUT("=@ccs", "=qm") (c) - : [i] "ir" (i) : "memory" ); - - return c; -} - -static inline int atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) - c = old; - return c; -} - -static inline void atomic_and(int m, atomic_t *v) -{ - asm volatile ( - "lock andl %1, %0" - : "+m" (*(volatile int *)&v->counter) - : "ir" (m) ); -} - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -#endif /* __ARCH_X86_ATOMIC__ */ diff --git a/xen/include/asm-x86/bitops.h b/xen/include/asm-x86/bitops.h deleted file mode 100644 index 5a71afbc89..0000000000 --- a/xen/include/asm-x86/bitops.h +++ /dev/null @@ -1,483 +0,0 @@ -#ifndef _X86_BITOPS_H -#define _X86_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - */ - -#include -#include - -/* - * We specify the memory operand as both input and output because the memory - * operand is both read from and written to. Since the operand is in fact a - * word array, we also specify "memory" in the clobbers list to indicate that - * words other than the one directly addressed by the memory operand may be - * modified. - */ - -#define ADDR (*(volatile int *) addr) -#define CONST_ADDR (*(const volatile int *) addr) - -extern void __bitop_bad_size(void); -#define bitop_bad_size(addr) (sizeof(*(addr)) < 4) - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile void *addr) -{ - asm volatile ( "lock; btsl %1,%0" - : "+m" (ADDR) : "Ir" (nr) : "memory"); -} -#define set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - set_bit(nr, addr); \ -}) - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void variable_set_bit(int nr, void *addr) -{ - asm volatile ( "btsl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" ); -} -static inline void constant_set_bit(int nr, void *addr) -{ - ((unsigned int *)addr)[nr >> 5] |= (1u << (nr & 31)); -} -#define __set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __builtin_constant_p(nr) ? \ - constant_set_bit(nr, addr) : \ - variable_set_bit(nr, addr); \ -}) - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. - */ -static inline void clear_bit(int nr, volatile void *addr) -{ - asm volatile ( "lock; btrl %1,%0" - : "+m" (ADDR) : "Ir" (nr) : "memory"); -} -#define clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - clear_bit(nr, addr); \ -}) - -/** - * __clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * Unlike clear_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void variable_clear_bit(int nr, void *addr) -{ - asm volatile ( "btrl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" ); -} -static inline void constant_clear_bit(int nr, void *addr) -{ - ((unsigned int *)addr)[nr >> 5] &= ~(1u << (nr & 31)); -} -#define __clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __builtin_constant_p(nr) ? \ - constant_clear_bit(nr, addr) : \ - variable_clear_bit(nr, addr); \ -}) - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void variable_change_bit(int nr, void *addr) -{ - asm volatile ( "btcl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" ); -} -static inline void constant_change_bit(int nr, void *addr) -{ - ((unsigned int *)addr)[nr >> 5] ^= (1u << (nr & 31)); -} -#define __change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __builtin_constant_p(nr) ? \ - constant_change_bit(nr, addr) : \ - variable_change_bit(nr, addr); \ -}) - -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile void *addr) -{ - asm volatile ( "lock; btcl %1,%0" - : "+m" (ADDR) : "Ir" (nr) : "memory"); -} -#define change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - change_bit(nr, addr); \ -}) - -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( "lock; btsl %[nr], %[addr]\n\t" - ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") - : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), - [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" ); - - return oldbit; -} -#define test_and_set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - test_and_set_bit(nr, addr); \ -}) - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, void *addr) -{ - int oldbit; - - asm volatile ( "btsl %[nr], %[addr]\n\t" - ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") - : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), - [addr] "+m" (*(int *)addr) : [nr] "Ir" (nr) : "memory" ); - - return oldbit; -} -#define __test_and_set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __test_and_set_bit(nr, addr); \ -}) - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( "lock; btrl %[nr], %[addr]\n\t" - ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") - : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), - [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" ); - - return oldbit; -} -#define test_and_clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - test_and_clear_bit(nr, addr); \ -}) - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, void *addr) -{ - int oldbit; - - asm volatile ( "btrl %[nr], %[addr]\n\t" - ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") - : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), - [addr] "+m" (*(int *)addr) : [nr] "Ir" (nr) : "memory" ); - - return oldbit; -} -#define __test_and_clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __test_and_clear_bit(nr, addr); \ -}) - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, void *addr) -{ - int oldbit; - - asm volatile ( "btcl %[nr], %[addr]\n\t" - ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") - : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), - [addr] "+m" (*(int *)addr) : [nr] "Ir" (nr) : "memory" ); - - return oldbit; -} -#define __test_and_change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __test_and_change_bit(nr, addr); \ -}) - -/** - * test_and_change_bit - Change a bit and return its new value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( "lock; btcl %[nr], %[addr]\n\t" - ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") - : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit), - [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" ); - - return oldbit; -} -#define test_and_change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - test_and_change_bit(nr, addr); \ -}) - -static inline int constant_test_bit(int nr, const volatile void *addr) -{ - return ((1U << (nr & 31)) & - (((const volatile unsigned int *)addr)[nr >> 5])) != 0; -} - -static inline int variable_test_bit(int nr, const volatile void *addr) -{ - int oldbit; - - asm volatile ( "btl %[nr], %[addr]\n\t" - ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t") - : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit) - : [addr] "m" (CONST_ADDR), [nr] "Ir" (nr) : "memory" ); - - return oldbit; -} - -#define test_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __builtin_constant_p(nr) ? \ - constant_test_bit(nr, addr) : \ - variable_test_bit(nr, addr); \ -}) - -extern unsigned int __find_first_bit( - const unsigned long *addr, unsigned int size); -extern unsigned int __find_next_bit( - const unsigned long *addr, unsigned int size, unsigned int offset); -extern unsigned int __find_first_zero_bit( - const unsigned long *addr, unsigned int size); -extern unsigned int __find_next_zero_bit( - const unsigned long *addr, unsigned int size, unsigned int offset); - -static always_inline unsigned int __scanbit(unsigned long val, unsigned int max) -{ - if ( __builtin_constant_p(max) && max == BITS_PER_LONG ) - alternative_io("bsf %[in],%[out]; cmovz %[max],%k[out]", - "rep; bsf %[in],%[out]", - X86_FEATURE_BMI1, - [out] "=&r" (val), - [in] "r" (val), [max] "r" (max)); - else - asm ( "bsf %1,%0 ; cmovz %2,%k0" - : "=&r" (val) : "r" (val), "r" (max) ); - return (unsigned int)val; -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) find_next_bit(addr, size, 0) - -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -#define find_next_bit(addr, size, off) ({ \ - unsigned int r__; \ - const unsigned long *a__ = (addr); \ - unsigned int s__ = (size); \ - unsigned int o__ = (off); \ - if ( o__ >= s__ ) \ - r__ = s__; \ - else if ( __builtin_constant_p(size) && s__ <= BITS_PER_LONG ) \ - r__ = o__ + __scanbit(*(const unsigned long *)(a__) >> o__, s__); \ - else if ( __builtin_constant_p(off) && !o__ ) \ - r__ = __find_first_bit(a__, s__); \ - else \ - r__ = __find_next_bit(a__, s__, o__); \ - r__; \ -}) - -/** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first zero bit, not the number of the byte - * containing a bit. - */ -#define find_first_zero_bit(addr, size) find_next_zero_bit(addr, size, 0) - -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -#define find_next_zero_bit(addr, size, off) ({ \ - unsigned int r__; \ - const unsigned long *a__ = (addr); \ - unsigned int s__ = (size); \ - unsigned int o__ = (off); \ - if ( o__ >= s__ ) \ - r__ = s__; \ - else if ( __builtin_constant_p(size) && s__ <= BITS_PER_LONG ) \ - r__ = o__ + __scanbit(~*(const unsigned long *)(a__) >> o__, s__); \ - else if ( __builtin_constant_p(off) && !o__ ) \ - r__ = __find_first_zero_bit(a__, s__); \ - else \ - r__ = __find_next_zero_bit(a__, s__, o__); \ - r__; \ -}) - -/** - * find_first_set_bit - find the first set bit in @word - * @word: the word to search - * - * Returns the bit-number of the first set bit. The input must *not* be zero. - */ -static inline unsigned int find_first_set_bit(unsigned long word) -{ - asm ( "rep; bsf %1,%0" : "=r" (word) : "rm" (word) ); - return (unsigned int)word; -} - -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as the libc and compiler builtin ffs routines. - */ -static inline int ffsl(unsigned long x) -{ - long r; - - asm ( "bsf %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return (int)r+1; -} - -static inline int ffs(unsigned int x) -{ - int r; - - asm ( "bsf %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return r + 1; -} - -/** - * fls - find last bit set - * @x: the word to search - * - * This is defined the same way as ffs. - */ -static inline int flsl(unsigned long x) -{ - long r; - - asm ( "bsr %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return (int)r+1; -} - -static inline int fls(unsigned int x) -{ - int r; - - asm ( "bsr %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return r + 1; -} - -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -#endif /* _X86_BITOPS_H */ diff --git a/xen/include/asm-x86/bug.h b/xen/include/asm-x86/bug.h deleted file mode 100644 index 9bb4a19420..0000000000 --- a/xen/include/asm-x86/bug.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef __X86_BUG_H__ -#define __X86_BUG_H__ - -#define BUG_DISP_WIDTH 24 -#define BUG_LINE_LO_WIDTH (31 - BUG_DISP_WIDTH) -#define BUG_LINE_HI_WIDTH (31 - BUG_DISP_WIDTH) - -#define BUGFRAME_run_fn 0 -#define BUGFRAME_warn 1 -#define BUGFRAME_bug 2 -#define BUGFRAME_assert 3 - -#define BUGFRAME_NR 4 - -#ifndef __ASSEMBLY__ - -struct bug_frame { - signed int loc_disp:BUG_DISP_WIDTH; - unsigned int line_hi:BUG_LINE_HI_WIDTH; - signed int ptr_disp:BUG_DISP_WIDTH; - unsigned int line_lo:BUG_LINE_LO_WIDTH; - signed int msg_disp[]; -}; - -#define bug_loc(b) ((const void *)(b) + (b)->loc_disp) -#define bug_ptr(b) ((const void *)(b) + (b)->ptr_disp) -#define bug_line(b) (((((b)->line_hi + ((b)->loc_disp < 0)) & \ - ((1 << BUG_LINE_HI_WIDTH) - 1)) << \ - BUG_LINE_LO_WIDTH) + \ - (((b)->line_lo + ((b)->ptr_disp < 0)) & \ - ((1 << BUG_LINE_LO_WIDTH) - 1))) -#define bug_msg(b) ((const char *)(b) + (b)->msg_disp[1]) - -#define _ASM_BUGFRAME_TEXT(second_frame) \ - ".Lbug%=: ud2\n" \ - ".pushsection .bug_frames.%c[bf_type], \"a\", @progbits\n" \ - ".p2align 2\n" \ - ".Lfrm%=:\n" \ - ".long (.Lbug%= - .Lfrm%=) + %c[bf_line_hi]\n" \ - ".long (%c[bf_ptr] - .Lfrm%=) + %c[bf_line_lo]\n" \ - ".if " #second_frame "\n" \ - ".long 0, %c[bf_msg] - .Lfrm%=\n" \ - ".endif\n" \ - ".popsection\n" \ - -#define _ASM_BUGFRAME_INFO(type, line, ptr, msg) \ - [bf_type] "i" (type), \ - [bf_ptr] "i" (ptr), \ - [bf_msg] "i" (msg), \ - [bf_line_lo] "i" ((line & ((1 << BUG_LINE_LO_WIDTH) - 1)) \ - << BUG_DISP_WIDTH), \ - [bf_line_hi] "i" (((line) >> BUG_LINE_LO_WIDTH) << BUG_DISP_WIDTH) - -#define BUG_FRAME(type, line, ptr, second_frame, msg) do { \ - BUILD_BUG_ON((line) >> (BUG_LINE_LO_WIDTH + BUG_LINE_HI_WIDTH)); \ - BUILD_BUG_ON((type) >= BUGFRAME_NR); \ - asm volatile ( _ASM_BUGFRAME_TEXT(second_frame) \ - :: _ASM_BUGFRAME_INFO(type, line, ptr, msg) ); \ -} while (0) - - -#define WARN() BUG_FRAME(BUGFRAME_warn, __LINE__, __FILE__, 0, NULL) -#define BUG() do { \ - BUG_FRAME(BUGFRAME_bug, __LINE__, __FILE__, 0, NULL); \ - unreachable(); \ -} while (0) - -#define run_in_exception_handler(fn) BUG_FRAME(BUGFRAME_run_fn, 0, fn, 0, NULL) - -#define assert_failed(msg) do { \ - BUG_FRAME(BUGFRAME_assert, __LINE__, __FILE__, 1, msg); \ - unreachable(); \ -} while (0) - -extern const struct bug_frame __start_bug_frames[], - __stop_bug_frames_0[], - __stop_bug_frames_1[], - __stop_bug_frames_2[], - __stop_bug_frames_3[]; - -#else /* !__ASSEMBLY__ */ - -/* - * Construct a bugframe, suitable for using in assembly code. Should always - * match the C version above. One complication is having to stash the strings - * in .rodata - */ - .macro BUG_FRAME type, line, file_str, second_frame, msg - - .if \type >= BUGFRAME_NR - .error "Invalid BUGFRAME index" - .endif - - .L\@ud: ud2a - - .pushsection .rodata.str1, "aMS", @progbits, 1 - .L\@s1: .asciz "\file_str" - .popsection - - .pushsection .bug_frames.\type, "a", @progbits - .p2align 2 - .L\@bf: - .long (.L\@ud - .L\@bf) + \ - ((\line >> BUG_LINE_LO_WIDTH) << BUG_DISP_WIDTH) - .long (.L\@s1 - .L\@bf) + \ - ((\line & ((1 << BUG_LINE_LO_WIDTH) - 1)) << BUG_DISP_WIDTH) - - .if \second_frame - .pushsection .rodata.str1, "aMS", @progbits, 1 - .L\@s2: .asciz "\msg" - .popsection - .long 0, (.L\@s2 - .L\@bf) - .endif - .popsection - .endm - -#define WARN BUG_FRAME BUGFRAME_warn, __LINE__, __FILE__, 0, 0 -#define BUG BUG_FRAME BUGFRAME_bug, __LINE__, __FILE__, 0, 0 - -#define ASSERT_FAILED(msg) \ - BUG_FRAME BUGFRAME_assert, __LINE__, __FILE__, 1, msg - -#endif /* !__ASSEMBLY__ */ - -#endif /* __X86_BUG_H__ */ diff --git a/xen/include/asm-x86/byteorder.h b/xen/include/asm-x86/byteorder.h deleted file mode 100644 index 1f77e502a5..0000000000 --- a/xen/include/asm-x86/byteorder.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef __ASM_X86_BYTEORDER_H__ -#define __ASM_X86_BYTEORDER_H__ - -#include -#include - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) -{ - asm("bswap %0" : "=r" (x) : "0" (x)); - return x; -} - -static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) -{ - union { - struct { __u32 a,b; } s; - __u64 u; - } v; - v.u = val; - asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" - : "=r" (v.s.a), "=r" (v.s.b) - : "0" (v.s.a), "1" (v.s.b)); - return v.u; -} - -/* Do not define swab16. Gcc is smart enough to recognize "C" version and - convert it into rotation or exhange. */ - -#define __arch__swab64(x) ___arch__swab64(x) -#define __arch__swab32(x) ___arch__swab32(x) - -#define __BYTEORDER_HAS_U64__ - -#include - -#endif /* __ASM_X86_BYTEORDER_H__ */ diff --git a/xen/include/asm-x86/bzimage.h b/xen/include/asm-x86/bzimage.h deleted file mode 100644 index 7ed69d3910..0000000000 --- a/xen/include/asm-x86/bzimage.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __X86_BZIMAGE_H__ -#define __X86_BZIMAGE_H__ - -#include - -unsigned long bzimage_headroom(void *image_start, unsigned long image_length); - -int bzimage_parse(void *image_base, void **image_start, - unsigned long *image_len); - -#endif /* __X86_BZIMAGE_H__ */ diff --git a/xen/include/asm-x86/cache.h b/xen/include/asm-x86/cache.h deleted file mode 100644 index 1f7173d8c7..0000000000 --- a/xen/include/asm-x86/cache.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * include/asm-x86/cache.h - */ -#ifndef __ARCH_X86_CACHE_H -#define __ARCH_X86_CACHE_H - - -/* L1 cache line size */ -#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - -#define __read_mostly __section(".data.read_mostly") - -#endif diff --git a/xen/include/asm-x86/compat.h b/xen/include/asm-x86/compat.h deleted file mode 100644 index 818cad87db..0000000000 --- a/xen/include/asm-x86/compat.h +++ /dev/null @@ -1,20 +0,0 @@ -/****************************************************************************** - * compat.h - */ - -#ifdef CONFIG_COMPAT - -#define COMPAT_BITS_PER_LONG 32 - -typedef uint32_t compat_ptr_t; -typedef unsigned long full_ptr_t; - -#endif - -struct domain; -#ifdef CONFIG_PV32 -int switch_compat(struct domain *); -#else -#include -static inline int switch_compat(struct domain *d) { return -EOPNOTSUPP; } -#endif diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h deleted file mode 100644 index 883c2ef0df..0000000000 --- a/xen/include/asm-x86/config.h +++ /dev/null @@ -1,329 +0,0 @@ -/****************************************************************************** - * config.h - * - * A Linux-style configuration list. - */ - -#ifndef __X86_CONFIG_H__ -#define __X86_CONFIG_H__ - -#define LONG_BYTEORDER 3 -#define CONFIG_PAGING_LEVELS 4 - -#define BYTES_PER_LONG (1 << LONG_BYTEORDER) -#define BITS_PER_LONG (BYTES_PER_LONG << 3) -#define BITS_PER_BYTE 8 -#define POINTER_ALIGN BYTES_PER_LONG - -#define BITS_PER_LLONG 64 - -#define BITS_PER_XEN_ULONG BITS_PER_LONG - -#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 -#define CONFIG_DISCONTIGMEM 1 -#define CONFIG_NUMA_EMU 1 -#define CONFIG_DOMAIN_PAGE 1 - -#define CONFIG_PAGEALLOC_MAX_ORDER (2 * PAGETABLE_ORDER) -#define CONFIG_DOMU_MAX_ORDER PAGETABLE_ORDER -#define CONFIG_HWDOM_MAX_ORDER 12 - -/* Intel P4 currently has largest cache line (L2 line size is 128 bytes). */ -#define CONFIG_X86_L1_CACHE_SHIFT 7 - -#define CONFIG_ACPI_NUMA 1 -#define CONFIG_ACPI_SRAT 1 -#define CONFIG_ACPI_CSTATE 1 - -#define CONFIG_WATCHDOG 1 - -#define CONFIG_MULTIBOOT 1 - -#define HZ 100 - -#define OPT_CONSOLE_STR "vga" - -/* Linkage for x86 */ -#ifdef __ASSEMBLY__ -#define ALIGN .align 16,0x90 -#define ENTRY(name) \ - .globl name; \ - ALIGN; \ - name: -#define GLOBAL(name) \ - .globl name; \ - name: -#endif - -#define NR_hypercalls 64 - -#ifndef NDEBUG -#define MEMORY_GUARD -#endif - -#define STACK_ORDER 3 -#define STACK_SIZE (PAGE_SIZE << STACK_ORDER) - -#define IST_SHSTK_SIZE 1024 - -#define TRAMPOLINE_STACK_SPACE PAGE_SIZE -#define TRAMPOLINE_SPACE (KB(64) - TRAMPOLINE_STACK_SPACE) -#define WAKEUP_STACK_MIN 3072 - -#define MBI_SPACE_MIN (2 * PAGE_SIZE) - -/* Primary stack is restricted to 8kB by guard pages. */ -#define PRIMARY_STACK_SIZE 8192 - -/* Primary shadow stack is slot 5 of 8, immediately under the primary stack. */ -#define PRIMARY_SHSTK_SLOT 5 - -/* Total size of syscall and emulation stubs. */ -#define STUB_BUF_SHIFT (L1_CACHE_SHIFT > 7 ? L1_CACHE_SHIFT : 7) -#define STUB_BUF_SIZE (1 << STUB_BUF_SHIFT) -#define STUBS_PER_PAGE (PAGE_SIZE / STUB_BUF_SIZE) - -/* Return value for zero-size _xmalloc(), distinguished from NULL. */ -#define ZERO_BLOCK_PTR ((void *)0xBAD0BAD0BAD0BAD0UL) - -/* Override include/xen/list.h to make these non-canonical addresses. */ -#define LIST_POISON1 ((void *)0x0100100100100100UL) -#define LIST_POISON2 ((void *)0x0200200200200200UL) - -#ifndef __ASSEMBLY__ -extern unsigned long trampoline_phys; -#define bootsym_phys(sym) \ - (((unsigned long)&(sym)-(unsigned long)&trampoline_start)+trampoline_phys) -#define bootsym(sym) \ - (*((typeof(sym) *)__va(bootsym_phys(sym)))) - -extern char trampoline_start[], trampoline_end[]; -extern char trampoline_realmode_entry[]; -extern unsigned int trampoline_xen_phys_start; -extern unsigned char trampoline_cpu_started; -extern char wakeup_start[]; - -extern unsigned char video_flags; - -extern unsigned short boot_edid_caps; -extern unsigned char boot_edid_info[128]; -#endif - -#include - -#define PML4_ENTRY_BITS 39 -#define PML4_ENTRY_BYTES (_AC(1,UL) << PML4_ENTRY_BITS) -#define PML4_ADDR(_slot) \ - (((_AC(_slot, UL) >> 8) * _AC(0xffff000000000000,UL)) | \ - (_AC(_slot, UL) << PML4_ENTRY_BITS)) - -/* - * Memory layout: - * 0x0000000000000000 - 0x00007fffffffffff [128TB, 2^47 bytes, PML4:0-255] - * Guest-defined use (see below for compatibility mode guests). - * 0x0000800000000000 - 0xffff7fffffffffff [16EB] - * Inaccessible: current arch only supports 48-bit sign-extended VAs. - * 0xffff800000000000 - 0xffff803fffffffff [256GB, 2^38 bytes, PML4:256] - * Read-only machine-to-phys translation table (GUEST ACCESSIBLE). - * 0xffff804000000000 - 0xffff807fffffffff [256GB, 2^38 bytes, PML4:256] - * Reserved for future shared info with the guest OS (GUEST ACCESSIBLE). - * 0xffff808000000000 - 0xffff80ffffffffff [512GB, 2^39 bytes, PML4:257] - * ioremap for PCI mmconfig space - * 0xffff810000000000 - 0xffff817fffffffff [512GB, 2^39 bytes, PML4:258] - * Guest linear page table. - * 0xffff818000000000 - 0xffff81ffffffffff [512GB, 2^39 bytes, PML4:259] - * Shadow linear page table. - * 0xffff820000000000 - 0xffff827fffffffff [512GB, 2^39 bytes, PML4:260] - * Per-domain mappings (e.g., GDT, LDT). - * 0xffff828000000000 - 0xffff82bfffffffff [256GB, 2^38 bytes, PML4:261] - * Machine-to-phys translation table. - * 0xffff82c000000000 - 0xffff82cfffffffff [64GB, 2^36 bytes, PML4:261] - * vmap()/ioremap()/fixmap area. - * 0xffff82d000000000 - 0xffff82d03fffffff [1GB, 2^30 bytes, PML4:261] - * Compatibility machine-to-phys translation table (CONFIG_PV32). - * 0xffff82d040000000 - 0xffff82d07fffffff [1GB, 2^30 bytes, PML4:261] - * Xen text, static data, bss. -#ifndef CONFIG_BIGMEM - * 0xffff82d080000000 - 0xffff82dfffffffff [62GB, PML4:261] - * Reserved for future use. - * 0xffff82e000000000 - 0xffff82ffffffffff [128GB, 2^37 bytes, PML4:261] - * Page-frame information array. - * 0xffff830000000000 - 0xffff87ffffffffff [5TB, 5*2^40 bytes, PML4:262-271] - * 1:1 direct mapping of all physical memory. -#else - * 0xffff82d080000000 - 0xffff82ffffffffff [190GB, PML4:261] - * Reserved for future use. - * 0xffff830000000000 - 0xffff847fffffffff [1.5TB, 3*2^39 bytes, PML4:262-264] - * Page-frame information array. - * 0xffff848000000000 - 0xffff87ffffffffff [3.5TB, 7*2^39 bytes, PML4:265-271] - * 1:1 direct mapping of all physical memory. -#endif - * 0xffff880000000000 - 0xffffffffffffffff [120TB, PML4:272-511] - * PV: Guest-defined use. - * 0xffff880000000000 - 0xffffff7fffffffff [119.5TB, PML4:272-510] - * HVM/idle: continuation of 1:1 mapping - * 0xffffff8000000000 - 0xffffffffffffffff [512GB, 2^39 bytes PML4:511] - * HVM/idle: unused - * - * Compatibility guest area layout: - * 0x0000000000000000 - 0x00000000f57fffff [3928MB, PML4:0] - * Guest-defined use. - * 0x00000000f5800000 - 0x00000000ffffffff [168MB, PML4:0] - * Read-only machine-to-phys translation table (GUEST ACCESSIBLE). - * 0x0000000100000000 - 0x000001ffffffffff [2TB-4GB, PML4:0-3] - * Unused / Reserved for future use. - * 0x0000020000000000 - 0x0000027fffffffff [512GB, 2^39 bytes, PML4:4] - * Mirror of per-domain mappings (for argument translation area; also HVM). - * 0x0000028000000000 - 0x00007fffffffffff [125.5TB, PML4:5-255] - * Unused / Reserved for future use. - */ - - -#define ROOT_PAGETABLE_FIRST_XEN_SLOT 256 -#define ROOT_PAGETABLE_LAST_XEN_SLOT 271 -#define ROOT_PAGETABLE_XEN_SLOTS \ - (L4_PAGETABLE_ENTRIES - ROOT_PAGETABLE_FIRST_XEN_SLOT - 1) -#define ROOT_PAGETABLE_PV_XEN_SLOTS \ - (ROOT_PAGETABLE_LAST_XEN_SLOT - ROOT_PAGETABLE_FIRST_XEN_SLOT + 1) - -/* Hypervisor reserves PML4 slots 256 to 271 inclusive. */ -#define HYPERVISOR_VIRT_START (PML4_ADDR(256)) -#define HYPERVISOR_VIRT_END (HYPERVISOR_VIRT_START + PML4_ENTRY_BYTES*16) -/* Slot 256: read-only guest-accessible machine-to-phys translation table. */ -#define RO_MPT_VIRT_START (PML4_ADDR(256)) -#define MPT_VIRT_SIZE (PML4_ENTRY_BYTES / 2) -#define RO_MPT_VIRT_END (RO_MPT_VIRT_START + MPT_VIRT_SIZE) -/* Slot 257: ioremap for PCI mmconfig space for 2048 segments (512GB) - * - full 16-bit segment support needs 44 bits - * - since PML4 slot has 39 bits, we limit segments to 2048 (11-bits) - */ -#define PCI_MCFG_VIRT_START (PML4_ADDR(257)) -#define PCI_MCFG_VIRT_END (PCI_MCFG_VIRT_START + PML4_ENTRY_BYTES) -/* Slot 258: linear page table (guest table). */ -#define LINEAR_PT_VIRT_START (PML4_ADDR(258)) -#define LINEAR_PT_VIRT_END (LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES) -/* Slot 259: linear page table (shadow table). */ -#define SH_LINEAR_PT_VIRT_START (PML4_ADDR(259)) -#define SH_LINEAR_PT_VIRT_END (SH_LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES) -/* Slot 260: per-domain mappings (including map cache). */ -#define PERDOMAIN_VIRT_START (PML4_ADDR(260)) -#define PERDOMAIN_SLOT_MBYTES (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER)) -#define PERDOMAIN_SLOTS 3 -#define PERDOMAIN_VIRT_SLOT(s) (PERDOMAIN_VIRT_START + (s) * \ - (PERDOMAIN_SLOT_MBYTES << 20)) -/* Slot 4: mirror of per-domain mappings (for compat xlat area accesses). */ -#define PERDOMAIN_ALT_VIRT_START PML4_ADDR(4) -/* Slot 261: machine-to-phys conversion table (256GB). */ -#define RDWR_MPT_VIRT_START (PML4_ADDR(261)) -#define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + MPT_VIRT_SIZE) -/* Slot 261: vmap()/ioremap()/fixmap area (64GB). */ -#define VMAP_VIRT_START RDWR_MPT_VIRT_END -#define VMAP_VIRT_END (VMAP_VIRT_START + GB(64)) -/* Slot 261: compatibility machine-to-phys conversion table (1GB). */ -#define RDWR_COMPAT_MPT_VIRT_START VMAP_VIRT_END -#define RDWR_COMPAT_MPT_VIRT_END (RDWR_COMPAT_MPT_VIRT_START + GB(1)) -/* Slot 261: xen text, static data, bss, per-cpu stubs and executable fixmap (1GB). */ -#define XEN_VIRT_START RDWR_COMPAT_MPT_VIRT_END -#define XEN_VIRT_END (XEN_VIRT_START + GB(1)) - -#ifndef CONFIG_BIGMEM -/* Slot 261: page-frame information array (128GB). */ -#define FRAMETABLE_SIZE GB(128) -#else -/* Slot 262-264: page-frame information array (1.5TB). */ -#define FRAMETABLE_SIZE GB(1536) -#endif -#define FRAMETABLE_VIRT_END DIRECTMAP_VIRT_START -#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table)) -#define FRAMETABLE_VIRT_START (FRAMETABLE_VIRT_END - FRAMETABLE_SIZE) - -#ifndef CONFIG_BIGMEM -/* Slot 262-271/510: A direct 1:1 mapping of all of physical memory. */ -#define DIRECTMAP_VIRT_START (PML4_ADDR(262)) -#define DIRECTMAP_SIZE (PML4_ENTRY_BYTES * (511 - 262)) -#else -/* Slot 265-271/510: A direct 1:1 mapping of all of physical memory. */ -#define DIRECTMAP_VIRT_START (PML4_ADDR(265)) -#define DIRECTMAP_SIZE (PML4_ENTRY_BYTES * (511 - 265)) -#endif -#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE) - -#ifndef __ASSEMBLY__ - -#ifdef CONFIG_PV32 - -/* This is not a fixed value, just a lower limit. */ -#define __HYPERVISOR_COMPAT_VIRT_START 0xF5800000 -#define HYPERVISOR_COMPAT_VIRT_START(d) ((d)->arch.hv_compat_vstart) - -#else /* !CONFIG_PV32 */ - -#define HYPERVISOR_COMPAT_VIRT_START(d) ((void)(d), 0) - -#endif /* CONFIG_PV32 */ - -#define MACH2PHYS_COMPAT_VIRT_START HYPERVISOR_COMPAT_VIRT_START -#define MACH2PHYS_COMPAT_VIRT_END 0xFFE00000 -#define MACH2PHYS_COMPAT_NR_ENTRIES(d) \ - ((MACH2PHYS_COMPAT_VIRT_END-MACH2PHYS_COMPAT_VIRT_START(d))>>2) - -#define COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d) \ - l2_table_offset(HYPERVISOR_COMPAT_VIRT_START(d)) -#define COMPAT_L2_PAGETABLE_LAST_XEN_SLOT l2_table_offset(~0U) -#define COMPAT_L2_PAGETABLE_XEN_SLOTS(d) \ - (COMPAT_L2_PAGETABLE_LAST_XEN_SLOT - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d) + 1) - -#define COMPAT_LEGACY_MAX_VCPUS XEN_LEGACY_MAX_VCPUS -#define COMPAT_HAVE_PV_GUEST_ENTRY XEN_HAVE_PV_GUEST_ENTRY -#define COMPAT_HAVE_PV_UPCALL_MASK XEN_HAVE_PV_UPCALL_MASK - -#endif - -#define __HYPERVISOR_CS 0xe008 -#define __HYPERVISOR_DS64 0x0000 -#define __HYPERVISOR_DS32 0xe010 -#define __HYPERVISOR_DS __HYPERVISOR_DS64 - -#define SYMBOLS_ORIGIN XEN_VIRT_START - -/* For generic assembly code: use macros to define operation/operand sizes. */ -#define __OS "q" /* Operation Suffix */ -#define __OP "r" /* Operand Prefix */ - -#ifndef __ASSEMBLY__ -extern unsigned long xen_phys_start; -#endif - -/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */ -#define GDT_LDT_VCPU_SHIFT 5 -#define GDT_LDT_VCPU_VA_SHIFT (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT) -#define GDT_LDT_MBYTES PERDOMAIN_SLOT_MBYTES -#define MAX_VIRT_CPUS (GDT_LDT_MBYTES << (20-GDT_LDT_VCPU_VA_SHIFT)) -#define GDT_LDT_VIRT_START PERDOMAIN_VIRT_SLOT(0) -#define GDT_LDT_VIRT_END (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20)) - -/* The address of a particular VCPU's GDT or LDT. */ -#define GDT_VIRT_START(v) \ - (PERDOMAIN_VIRT_START + ((v)->vcpu_id << GDT_LDT_VCPU_VA_SHIFT)) -#define LDT_VIRT_START(v) \ - (GDT_VIRT_START(v) + (64*1024)) - -/* map_domain_page() map cache. The second per-domain-mapping sub-area. */ -#define MAPCACHE_VCPU_ENTRIES (CONFIG_PAGING_LEVELS * CONFIG_PAGING_LEVELS) -#define MAPCACHE_ENTRIES (MAX_VIRT_CPUS * MAPCACHE_VCPU_ENTRIES) -#define MAPCACHE_VIRT_START PERDOMAIN_VIRT_SLOT(1) -#define MAPCACHE_VIRT_END (MAPCACHE_VIRT_START + \ - MAPCACHE_ENTRIES * PAGE_SIZE) - -/* Argument translation area. The third per-domain-mapping sub-area. */ -#define ARG_XLAT_VIRT_START PERDOMAIN_VIRT_SLOT(2) -/* Allow for at least one guard page (COMPAT_ARG_XLAT_SIZE being 2 pages): */ -#define ARG_XLAT_VA_SHIFT (2 + PAGE_SHIFT) -#define ARG_XLAT_START(v) \ - (ARG_XLAT_VIRT_START + ((v)->vcpu_id << ARG_XLAT_VA_SHIFT)) - -#define ELFSIZE 64 - -#define ARCH_CRASH_SAVE_VMCOREINFO - -#endif /* __X86_CONFIG_H__ */ diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h deleted file mode 100644 index 4754940e23..0000000000 --- a/xen/include/asm-x86/cpufeature.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * cpufeature.h - * - * Defines x86 CPU feature bits - */ -#ifndef __ASM_I386_CPUFEATURE_H -#define __ASM_I386_CPUFEATURE_H - -#include -#include - -#define cpufeat_word(idx) ((idx) / 32) -#define cpufeat_bit(idx) ((idx) % 32) -#define cpufeat_mask(idx) (_AC(1, U) << cpufeat_bit(idx)) - -/* An alias of a feature we know is always going to be present. */ -#define X86_FEATURE_ALWAYS X86_FEATURE_LM - -#ifndef __ASSEMBLY__ -#include - -#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) -#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) - -#define CPUID_PM_LEAF 6 -#define CPUID6_ECX_APERFMPERF_CAPABILITY 0x1 - -/* CPUID level 0x00000001.edx */ -#define cpu_has_fpu 1 -#define cpu_has_de 1 -#define cpu_has_pse 1 -#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) -#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) -#define cpu_has_mtrr 1 -#define cpu_has_pge 1 -#define cpu_has_pse36 boot_cpu_has(X86_FEATURE_PSE36) -#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) -#define cpu_has_mmx 1 -#define cpu_has_htt boot_cpu_has(X86_FEATURE_HTT) - -/* CPUID level 0x00000001.ecx */ -#define cpu_has_sse3 boot_cpu_has(X86_FEATURE_SSE3) -#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) -#define cpu_has_monitor boot_cpu_has(X86_FEATURE_MONITOR) -#define cpu_has_vmx boot_cpu_has(X86_FEATURE_VMX) -#define cpu_has_eist boot_cpu_has(X86_FEATURE_EIST) -#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) -#define cpu_has_fma boot_cpu_has(X86_FEATURE_FMA) -#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) -#define cpu_has_pdcm boot_cpu_has(X86_FEATURE_PDCM) -#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID) -#define cpu_has_sse4_1 boot_cpu_has(X86_FEATURE_SSE4_1) -#define cpu_has_sse4_2 boot_cpu_has(X86_FEATURE_SSE4_2) -#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) -#define cpu_has_popcnt boot_cpu_has(X86_FEATURE_POPCNT) -#define cpu_has_aesni boot_cpu_has(X86_FEATURE_AESNI) -#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) -#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) -#define cpu_has_f16c boot_cpu_has(X86_FEATURE_F16C) -#define cpu_has_rdrand boot_cpu_has(X86_FEATURE_RDRAND) -#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) - -/* CPUID level 0x80000001.edx */ -#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) -#define cpu_has_page1gb boot_cpu_has(X86_FEATURE_PAGE1GB) -#define cpu_has_rdtscp boot_cpu_has(X86_FEATURE_RDTSCP) -#define cpu_has_3dnow_ext boot_cpu_has(X86_FEATURE_3DNOWEXT) -#define cpu_has_3dnow boot_cpu_has(X86_FEATURE_3DNOW) - -/* CPUID level 0x80000001.ecx */ -#define cpu_has_cmp_legacy boot_cpu_has(X86_FEATURE_CMP_LEGACY) -#define cpu_has_svm boot_cpu_has(X86_FEATURE_SVM) -#define cpu_has_sse4a boot_cpu_has(X86_FEATURE_SSE4A) -#define cpu_has_xop boot_cpu_has(X86_FEATURE_XOP) -#define cpu_has_skinit boot_cpu_has(X86_FEATURE_SKINIT) -#define cpu_has_fma4 boot_cpu_has(X86_FEATURE_FMA4) -#define cpu_has_tbm boot_cpu_has(X86_FEATURE_TBM) - -/* CPUID level 0x0000000D:1.eax */ -#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) -#define cpu_has_xsavec boot_cpu_has(X86_FEATURE_XSAVEC) -#define cpu_has_xgetbv1 boot_cpu_has(X86_FEATURE_XGETBV1) -#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) - -/* CPUID level 0x00000007:0.ebx */ -#define cpu_has_bmi1 boot_cpu_has(X86_FEATURE_BMI1) -#define cpu_has_hle boot_cpu_has(X86_FEATURE_HLE) -#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) -#define cpu_has_smep boot_cpu_has(X86_FEATURE_SMEP) -#define cpu_has_bmi2 boot_cpu_has(X86_FEATURE_BMI2) -#define cpu_has_invpcid boot_cpu_has(X86_FEATURE_INVPCID) -#define cpu_has_rtm boot_cpu_has(X86_FEATURE_RTM) -#define cpu_has_pqe boot_cpu_has(X86_FEATURE_PQE) -#define cpu_has_fpu_sel (!boot_cpu_has(X86_FEATURE_NO_FPU_SEL)) -#define cpu_has_mpx boot_cpu_has(X86_FEATURE_MPX) -#define cpu_has_avx512f boot_cpu_has(X86_FEATURE_AVX512F) -#define cpu_has_avx512dq boot_cpu_has(X86_FEATURE_AVX512DQ) -#define cpu_has_rdseed boot_cpu_has(X86_FEATURE_RDSEED) -#define cpu_has_smap boot_cpu_has(X86_FEATURE_SMAP) -#define cpu_has_avx512_ifma boot_cpu_has(X86_FEATURE_AVX512_IFMA) -#define cpu_has_clflushopt boot_cpu_has(X86_FEATURE_CLFLUSHOPT) -#define cpu_has_clwb boot_cpu_has(X86_FEATURE_CLWB) -#define cpu_has_avx512er boot_cpu_has(X86_FEATURE_AVX512ER) -#define cpu_has_avx512cd boot_cpu_has(X86_FEATURE_AVX512CD) -#define cpu_has_proc_trace boot_cpu_has(X86_FEATURE_PROC_TRACE) -#define cpu_has_sha boot_cpu_has(X86_FEATURE_SHA) -#define cpu_has_avx512bw boot_cpu_has(X86_FEATURE_AVX512BW) -#define cpu_has_avx512vl boot_cpu_has(X86_FEATURE_AVX512VL) - -/* CPUID level 0x00000007:0.ecx */ -#define cpu_has_avx512_vbmi boot_cpu_has(X86_FEATURE_AVX512_VBMI) -#define cpu_has_avx512_vbmi2 boot_cpu_has(X86_FEATURE_AVX512_VBMI2) -#define cpu_has_gfni boot_cpu_has(X86_FEATURE_GFNI) -#define cpu_has_vaes boot_cpu_has(X86_FEATURE_VAES) -#define cpu_has_vpclmulqdq boot_cpu_has(X86_FEATURE_VPCLMULQDQ) -#define cpu_has_avx512_vnni boot_cpu_has(X86_FEATURE_AVX512_VNNI) -#define cpu_has_avx512_bitalg boot_cpu_has(X86_FEATURE_AVX512_BITALG) -#define cpu_has_avx512_vpopcntdq boot_cpu_has(X86_FEATURE_AVX512_VPOPCNTDQ) -#define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID) -#define cpu_has_movdiri boot_cpu_has(X86_FEATURE_MOVDIRI) -#define cpu_has_movdir64b boot_cpu_has(X86_FEATURE_MOVDIR64B) -#define cpu_has_enqcmd boot_cpu_has(X86_FEATURE_ENQCMD) - -/* CPUID level 0x80000007.edx */ -#define cpu_has_hw_pstate boot_cpu_has(X86_FEATURE_HW_PSTATE) -#define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC) - -/* CPUID level 0x80000008.ebx */ -#define cpu_has_amd_ssbd boot_cpu_has(X86_FEATURE_AMD_SSBD) -#define cpu_has_virt_ssbd boot_cpu_has(X86_FEATURE_VIRT_SSBD) -#define cpu_has_ssb_no boot_cpu_has(X86_FEATURE_SSB_NO) - -/* CPUID level 0x00000007:0.edx */ -#define cpu_has_avx512_4vnniw boot_cpu_has(X86_FEATURE_AVX512_4VNNIW) -#define cpu_has_avx512_4fmaps boot_cpu_has(X86_FEATURE_AVX512_4FMAPS) -#define cpu_has_avx512_vp2intersect boot_cpu_has(X86_FEATURE_AVX512_VP2INTERSECT) -#define cpu_has_rtm_always_abort boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) -#define cpu_has_tsx_force_abort boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) -#define cpu_has_serialize boot_cpu_has(X86_FEATURE_SERIALIZE) -#define cpu_has_arch_caps boot_cpu_has(X86_FEATURE_ARCH_CAPS) - -/* CPUID level 0x00000007:1.eax */ -#define cpu_has_avx_vnni boot_cpu_has(X86_FEATURE_AVX_VNNI) -#define cpu_has_avx512_bf16 boot_cpu_has(X86_FEATURE_AVX512_BF16) - -/* Synthesized. */ -#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) -#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING) -#define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF) -#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH) -#define cpu_has_nscb boot_cpu_has(X86_FEATURE_NSCB) -#define cpu_has_xen_lbr boot_cpu_has(X86_FEATURE_XEN_LBR) -#define cpu_has_xen_shstk boot_cpu_has(X86_FEATURE_XEN_SHSTK) - -#define cpu_has_msr_tsc_aux (cpu_has_rdtscp || cpu_has_rdpid) - -/* Bugs. */ -#define cpu_bug_fpu_ptrs boot_cpu_has(X86_BUG_FPU_PTRS) -#define cpu_bug_null_seg boot_cpu_has(X86_BUG_NULL_SEG) - -enum _cache_type { - CACHE_TYPE_NULL = 0, - CACHE_TYPE_DATA = 1, - CACHE_TYPE_INST = 2, - CACHE_TYPE_UNIFIED = 3 -}; - -union _cpuid4_leaf_eax { - struct { - enum _cache_type type:5; - unsigned int level:3; - unsigned int is_self_initializing:1; - unsigned int is_fully_associative:1; - unsigned int reserved:4; - unsigned int num_threads_sharing:12; - unsigned int num_cores_on_die:6; - } split; - u32 full; -}; - -union _cpuid4_leaf_ebx { - struct { - unsigned int coherency_line_size:12; - unsigned int physical_line_partition:10; - unsigned int ways_of_associativity:10; - } split; - u32 full; -}; - -union _cpuid4_leaf_ecx { - struct { - unsigned int number_of_sets:32; - } split; - u32 full; -}; - -struct cpuid4_info { - union _cpuid4_leaf_eax eax; - union _cpuid4_leaf_ebx ebx; - union _cpuid4_leaf_ecx ecx; - unsigned long size; -}; - -int cpuid4_cache_lookup(int index, struct cpuid4_info *this_leaf); -#endif /* !__ASSEMBLY__ */ - -#endif /* __ASM_I386_CPUFEATURE_H */ - -/* - * Local Variables: - * mode:c - * comment-column:42 - * End: - */ diff --git a/xen/include/asm-x86/cpufeatures.h b/xen/include/asm-x86/cpufeatures.h deleted file mode 100644 index b10154fc44..0000000000 --- a/xen/include/asm-x86/cpufeatures.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Explicitly intended for multiple inclusion. - */ - -#include - -/* Number of capability words covered by the featureset words. */ -#define FSCAPINTS FEATURESET_NR_ENTRIES - -/* Synthetic words follow the featureset words. */ -#define X86_NR_SYNTH 1 -#define X86_SYNTH(x) (FSCAPINTS * 32 + (x)) - -/* Synthetic features */ -XEN_CPUFEATURE(CONSTANT_TSC, X86_SYNTH( 0)) /* TSC ticks at a constant rate */ -XEN_CPUFEATURE(NONSTOP_TSC, X86_SYNTH( 1)) /* TSC does not stop in C states */ -XEN_CPUFEATURE(ARAT, X86_SYNTH( 2)) /* Always running APIC timer */ -XEN_CPUFEATURE(ARCH_PERFMON, X86_SYNTH( 3)) /* Intel Architectural PerfMon */ -XEN_CPUFEATURE(TSC_RELIABLE, X86_SYNTH( 4)) /* TSC is known to be reliable */ -XEN_CPUFEATURE(XTOPOLOGY, X86_SYNTH( 5)) /* cpu topology enum extensions */ -XEN_CPUFEATURE(CPUID_FAULTING, X86_SYNTH( 6)) /* cpuid faulting */ -XEN_CPUFEATURE(CLFLUSH_MONITOR, X86_SYNTH( 7)) /* clflush reqd with monitor */ -XEN_CPUFEATURE(APERFMPERF, X86_SYNTH( 8)) /* APERFMPERF */ -XEN_CPUFEATURE(MFENCE_RDTSC, X86_SYNTH( 9)) /* MFENCE synchronizes RDTSC */ -XEN_CPUFEATURE(XEN_SMEP, X86_SYNTH(10)) /* SMEP gets used by Xen itself */ -XEN_CPUFEATURE(XEN_SMAP, X86_SYNTH(11)) /* SMAP gets used by Xen itself */ -/* Bit 12 - unused. */ -XEN_CPUFEATURE(IND_THUNK_LFENCE, X86_SYNTH(13)) /* Use IND_THUNK_LFENCE */ -XEN_CPUFEATURE(IND_THUNK_JMP, X86_SYNTH(14)) /* Use IND_THUNK_JMP */ -XEN_CPUFEATURE(SC_NO_BRANCH_HARDEN, X86_SYNTH(15)) /* (Disable) Conditional branch hardening */ -XEN_CPUFEATURE(SC_MSR_PV, X86_SYNTH(16)) /* MSR_SPEC_CTRL used by Xen for PV */ -XEN_CPUFEATURE(SC_MSR_HVM, X86_SYNTH(17)) /* MSR_SPEC_CTRL used by Xen for HVM */ -XEN_CPUFEATURE(SC_RSB_PV, X86_SYNTH(18)) /* RSB overwrite needed for PV */ -XEN_CPUFEATURE(SC_RSB_HVM, X86_SYNTH(19)) /* RSB overwrite needed for HVM */ -XEN_CPUFEATURE(XEN_SELFSNOOP, X86_SYNTH(20)) /* SELFSNOOP gets used by Xen itself */ -XEN_CPUFEATURE(SC_MSR_IDLE, X86_SYNTH(21)) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */ -XEN_CPUFEATURE(XEN_LBR, X86_SYNTH(22)) /* Xen uses MSR_DEBUGCTL.LBR */ -XEN_CPUFEATURE(SC_VERW_PV, X86_SYNTH(23)) /* VERW used by Xen for PV */ -XEN_CPUFEATURE(SC_VERW_HVM, X86_SYNTH(24)) /* VERW used by Xen for HVM */ -XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */ -XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */ - -/* Bug words follow the synthetic words. */ -#define X86_NR_BUG 1 -#define X86_BUG(x) ((FSCAPINTS + X86_NR_SYNTH) * 32 + (x)) - -#define X86_BUG_FPU_PTRS X86_BUG( 0) /* (F)X{SAVE,RSTOR} doesn't save/restore FOP/FIP/FDP. */ -#define X86_BUG_NULL_SEG X86_BUG( 1) /* NULL-ing a selector preserves the base and limit. */ - -/* Total number of capability words, inc synth and bug words. */ -#define NCAPINTS (FSCAPINTS + X86_NR_SYNTH + X86_NR_BUG) /* N 32-bit words worth of info */ diff --git a/xen/include/asm-x86/cpufeatureset.h b/xen/include/asm-x86/cpufeatureset.h deleted file mode 100644 index f179229f19..0000000000 --- a/xen/include/asm-x86/cpufeatureset.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __XEN_X86_CPUFEATURESET_H__ -#define __XEN_X86_CPUFEATURESET_H__ - -#ifndef __ASSEMBLY__ - -#include - -#define XEN_CPUFEATURE(name, value) X86_FEATURE_##name = value, -enum { -#include -#include -}; -#undef XEN_CPUFEATURE - -#define XEN_CPUFEATURE(name, value) asm (".equ X86_FEATURE_" #name ", " \ - __stringify(value)); -#include -#include - -#else /* !__ASSEMBLY__ */ - -#define XEN_CPUFEATURE(name, value) .equ X86_FEATURE_##name, value -#include -#include - -#endif /* __ASSEMBLY__ */ - -#undef XEN_CPUFEATURE - -#endif /* !__XEN_X86_CPUFEATURESET_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/cpuid.h b/xen/include/asm-x86/cpuid.h deleted file mode 100644 index 46904061d0..0000000000 --- a/xen/include/asm-x86/cpuid.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef __X86_CPUID_H__ -#define __X86_CPUID_H__ - -#include - -#ifndef __ASSEMBLY__ -#include -#include -#include - -#include -#include - -#include - -extern const uint32_t known_features[FSCAPINTS]; - -void init_guest_cpuid(void); - -/* - * Expected levelling capabilities (given cpuid vendor/family information), - * and levelling capabilities actually available (given MSR probing). - */ -#define LCAP_faulting XEN_SYSCTL_CPU_LEVELCAP_faulting -#define LCAP_1cd (XEN_SYSCTL_CPU_LEVELCAP_ecx | \ - XEN_SYSCTL_CPU_LEVELCAP_edx) -#define LCAP_e1cd (XEN_SYSCTL_CPU_LEVELCAP_extd_ecx | \ - XEN_SYSCTL_CPU_LEVELCAP_extd_edx) -#define LCAP_Da1 XEN_SYSCTL_CPU_LEVELCAP_xsave_eax -#define LCAP_6c XEN_SYSCTL_CPU_LEVELCAP_thermal_ecx -#define LCAP_7ab0 (XEN_SYSCTL_CPU_LEVELCAP_l7s0_eax | \ - XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx) -extern unsigned int expected_levelling_cap, levelling_caps; - -struct cpuidmasks -{ - uint64_t _1cd; - uint64_t e1cd; - uint64_t Da1; - uint64_t _6c; - uint64_t _7ab0; -}; - -/* Per CPU shadows of masking MSR values, for lazy context switching. */ -DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks); - -/* Default masking MSR values, calculated at boot. */ -extern struct cpuidmasks cpuidmask_defaults; - -extern struct cpuid_policy raw_cpuid_policy, host_cpuid_policy, - pv_max_cpuid_policy, pv_def_cpuid_policy, - hvm_max_cpuid_policy, hvm_def_cpuid_policy; - -extern const struct cpu_policy system_policies[]; - -/* Check that all previously present features are still available. */ -bool recheck_cpu_features(unsigned int cpu); - -/* Allocate and initialise a CPUID policy suitable for the domain. */ -int init_domain_cpuid_policy(struct domain *d); - -/* Clamp the CPUID policy to reality. */ -void recalculate_cpuid_policy(struct domain *d); - -struct vcpu; -void guest_cpuid(const struct vcpu *v, uint32_t leaf, - uint32_t subleaf, struct cpuid_leaf *res); - -#endif /* __ASSEMBLY__ */ -#endif /* !__X86_CPUID_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/cpuidle.h b/xen/include/asm-x86/cpuidle.h deleted file mode 100644 index 0981a8fd64..0000000000 --- a/xen/include/asm-x86/cpuidle.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef __ASM_X86_CPUIDLE_H__ -#define __ASM_X86_CPUIDLE_H__ - -#include -#include -#include - -extern struct acpi_processor_power *processor_powers[]; - -extern void (*pm_idle_save)(void); - -bool lapic_timer_init(void); -extern void (*lapic_timer_off)(void); -extern void (*lapic_timer_on)(void); - -extern uint64_t (*cpuidle_get_tick)(void); - -int mwait_idle_init(struct notifier_block *); -int cpuidle_init_cpu(unsigned int cpu); -void default_dead_idle(void); -void acpi_dead_idle(void); -void play_dead(void); -void trace_exit_reason(u32 *irq_traced); -void update_idle_stats(struct acpi_processor_power *, - struct acpi_processor_cx *, uint64_t, uint64_t); -void update_last_cx_stat(struct acpi_processor_power *, - struct acpi_processor_cx *, uint64_t); - -bool errata_c6_workaround(void); - -#endif /* __X86_ASM_CPUIDLE_H__ */ diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h deleted file mode 100644 index a74ad4bc4c..0000000000 --- a/xen/include/asm-x86/current.h +++ /dev/null @@ -1,210 +0,0 @@ -/****************************************************************************** - * current.h - * - * Information structure that lives at the bottom of the per-cpu Xen stack. - */ - -#ifndef __X86_CURRENT_H__ -#define __X86_CURRENT_H__ - -#include -#include -#include - -/* - * Xen's cpu stacks are 8 pages (8-page aligned), arranged as: - * - * 7 - Primary stack (with a struct cpu_info at the top) - * 6 - Primary stack - * 5 - Primay Shadow Stack (read-only) - * 4 - #DF IST stack - * 3 - #DB IST stack - * 2 - NMI IST stack - * 1 - #MC IST stack - * 0 - IST Shadow Stacks (4x 1k, read-only) - */ - -/* - * Identify which stack page the stack pointer is on. Returns an index - * as per the comment above. - */ -static inline unsigned int get_stack_page(unsigned long sp) -{ - return (sp & (STACK_SIZE-1)) >> PAGE_SHIFT; -} - -struct vcpu; - -struct cpu_info { - struct cpu_user_regs guest_cpu_user_regs; - unsigned int processor_id; - unsigned int verw_sel; - struct vcpu *current_vcpu; - unsigned long per_cpu_offset; - unsigned long cr4; - /* - * Of the two following fields the latter is being set to the CR3 value - * to be used on the given pCPU for loading whenever 64-bit PV guest - * context is being entered. A value of zero indicates no setting of CR3 - * is to be performed. - * The former is the value to restore when re-entering Xen, if any. IOW - * its value being zero means there's nothing to restore. - */ - unsigned long xen_cr3; - unsigned long pv_cr3; - - /* See asm-x86/spec_ctrl_asm.h for usage. */ - unsigned int shadow_spec_ctrl; - uint8_t xen_spec_ctrl; - uint8_t spec_ctrl_flags; - - /* - * The following field controls copying of the L4 page table of 64-bit - * PV guests to the per-cpu root page table on entering the guest context. - * If set the L4 page table is being copied to the root page table and - * the field will be reset. - */ - bool root_pgt_changed; - - /* - * use_pv_cr3 is set in case the value of pv_cr3 is to be written into - * CR3 when returning from an interrupt. The main use is when returning - * from a NMI or MCE to hypervisor code where pv_cr3 was active. - */ - bool use_pv_cr3; - - unsigned long __pad; - /* get_stack_bottom() must be 16-byte aligned */ -}; - -static inline struct cpu_info *get_cpu_info_from_stack(unsigned long sp) -{ - return (struct cpu_info *)((sp | (STACK_SIZE - 1)) + 1) - 1; -} - -static inline struct cpu_info *get_cpu_info(void) -{ -#ifdef __clang__ - /* Clang complains that sp in the else case is not initialised. */ - unsigned long sp; - asm ( "mov %%rsp, %0" : "=r" (sp) ); -#else - register unsigned long sp asm("rsp"); -#endif - - return get_cpu_info_from_stack(sp); -} - -#define get_current() (get_cpu_info()->current_vcpu) -#define set_current(vcpu) (get_cpu_info()->current_vcpu = (vcpu)) -#define current (get_current()) - -#define get_processor_id() (get_cpu_info()->processor_id) -#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) - -/* - * Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points - * into the middle of cpu_info.guest_cpu_user_regs, at the section that - * precisely corresponds to a CPU trap frame. - */ -#define get_stack_bottom() \ - ((unsigned long)&get_cpu_info()->guest_cpu_user_regs.es) - -/* - * Get the reasonable stack bounds for stack traces and stack dumps. Stack - * dumps have a slightly larger range to include exception frames in the - * printed information. The returned word is inside the interesting range. - */ -unsigned long get_stack_trace_bottom(unsigned long sp); -unsigned long get_stack_dump_bottom (unsigned long sp); - -#ifdef CONFIG_LIVEPATCH -# define CHECK_FOR_LIVEPATCH_WORK "call check_for_livepatch_work;" -#elif defined(CONFIG_DEBUG) -/* Mimic the clobbering effect a call has on registers. */ -# define CHECK_FOR_LIVEPATCH_WORK \ - "mov $0x1234567890abcdef, %%rax\n\t" \ - "mov %%rax, %%rcx; mov %%rax, %%rdx\n\t" \ - "mov %%rax, %%rsi; mov %%rax, %%rdi\n\t" \ - "mov %%rax, %%r8; mov %%rax, %%r9\n\t" \ - "mov %%rax, %%r10; mov %%rax, %%r11\n\t" -#else -# define CHECK_FOR_LIVEPATCH_WORK "" -#endif - -#ifdef CONFIG_XEN_SHSTK -/* - * We need to unwind the primary shadow stack to its supervisor token, located - * in the last word of the primary shadow stack. - * - * Read the shadow stack pointer, subtract it from supervisor token position, - * and divide by 8 to get the number of slots needing popping. - * - * INCSSPQ can't pop more than 255 entries. We shouldn't ever need to pop - * that many entries, and getting this wrong will cause us to #DF later. Turn - * it into a BUG() now for fractionally easier debugging. - */ -# define SHADOW_STACK_WORK \ - "mov $1, %[ssp];" \ - "rdsspd %[ssp];" \ - "cmp $1, %[ssp];" \ - "je .L_shstk_done.%=;" /* CET not active? Skip. */ \ - "mov $%c[skstk_base], %[val];" \ - "and $%c[stack_mask], %[ssp];" \ - "sub %[ssp], %[val];" \ - "shr $3, %[val];" \ - "cmp $255, %[val];" /* More than 255 entries? Crash. */ \ - UNLIKELY_START(a, shstk_adjust) \ - _ASM_BUGFRAME_TEXT(0) \ - UNLIKELY_END_SECTION ";" \ - "incsspq %q[val];" \ - ".L_shstk_done.%=:" -#else -# define SHADOW_STACK_WORK "" -#endif - -#if __GNUC__ >= 9 -# define ssaj_has_attr_noreturn(fn) __builtin_has_attribute(fn, __noreturn__) -#else -/* Simply can't check the property with older gcc. */ -# define ssaj_has_attr_noreturn(fn) true -#endif - -#define switch_stack_and_jump(fn, instr, constr) \ - ({ \ - unsigned int tmp; \ - (void)((fn) == (void (*)(void))NULL); \ - BUILD_BUG_ON(!ssaj_has_attr_noreturn(fn)); \ - __asm__ __volatile__ ( \ - SHADOW_STACK_WORK \ - "mov %[stk], %%rsp;" \ - CHECK_FOR_LIVEPATCH_WORK \ - instr "[fun]" \ - : [val] "=&r" (tmp), \ - [ssp] "=&r" (tmp) \ - : [stk] "r" (guest_cpu_user_regs()), \ - [fun] constr (fn), \ - [skstk_base] "i" \ - ((PRIMARY_SHSTK_SLOT + 1) * PAGE_SIZE - 8), \ - [stack_mask] "i" (STACK_SIZE - 1), \ - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, \ - __FILE__, NULL) \ - : "memory" ); \ - unreachable(); \ - }) - -#define reset_stack_and_jump(fn) \ - switch_stack_and_jump(fn, "jmp %c", "i") - -/* The constraint may only specify non-call-clobbered registers. */ -#define reset_stack_and_jump_ind(fn) \ - switch_stack_and_jump(fn, "INDIRECT_JMP %", "b") - -/* - * Which VCPU's state is currently running on each CPU? - * This is not necesasrily the same as 'current' as a CPU may be - * executing a lazy state switch. - */ -DECLARE_PER_CPU(struct vcpu *, curr_vcpu); - -#endif /* __X86_CURRENT_H__ */ diff --git a/xen/include/asm-x86/debugger.h b/xen/include/asm-x86/debugger.h deleted file mode 100644 index 99803bfd0c..0000000000 --- a/xen/include/asm-x86/debugger.h +++ /dev/null @@ -1,101 +0,0 @@ -/****************************************************************************** - * asm/debugger.h - * - * Generic hooks into arch-dependent Xen. - * - * Each debugger should define two functions here: - * - * 1. debugger_trap_entry(): - * Called at start of any synchronous fault or trap, before any other work - * is done. The idea is that if your debugger deliberately caused the trap - * (e.g. to implement breakpoints or data watchpoints) then you can take - * appropriate action and return a non-zero value to cause early exit from - * the trap function. - * - * 2. debugger_trap_fatal(): - * Called when Xen is about to give up and crash. Typically you will use this - * hook to drop into a debug session. It can also be used to hook off - * deliberately caused traps (which you then handle and return non-zero). - * - * 3. debugger_trap_immediate(): - * Called if we want to drop into a debugger now. This is essentially the - * same as debugger_trap_fatal, except that we use the current register state - * rather than the state which was in effect when we took the trap. - * For example: if we're dying because of an unhandled exception, we call - * debugger_trap_fatal; if we're dying because of a panic() we call - * debugger_trap_immediate(). - */ - -#ifndef __X86_DEBUGGER_H__ -#define __X86_DEBUGGER_H__ - -#include -#include -#include - -void domain_pause_for_debugger(void); - -#ifdef CONFIG_CRASH_DEBUG - -#include - -static inline bool debugger_trap_fatal( - unsigned int vector, struct cpu_user_regs *regs) -{ - int rc = __trap_to_gdb(regs, vector); - return ((rc == 0) || (vector == TRAP_int3)); -} - -/* Int3 is a trivial way to gather cpu_user_regs context. */ -#define debugger_trap_immediate() __asm__ __volatile__ ( "int3" ); - -static inline bool debugger_trap_entry( - unsigned int vector, struct cpu_user_regs *regs) -{ - /* - * This function is called before any checks are made. Amongst other - * things, be aware that during early boot, current is not a safe pointer - * to follow. - */ - struct vcpu *v = current; - - if ( vector != TRAP_int3 && vector != TRAP_debug ) - return false; - - if ( guest_mode(regs) && guest_kernel_mode(v, regs) && - v->domain->debugger_attached ) - { - if ( vector != TRAP_debug ) /* domain pause is good enough */ - current->arch.gdbsx_vcpu_event = vector; - domain_pause_for_debugger(); - return true; - } - - return false; -} - -#else - -static inline bool debugger_trap_fatal( - unsigned int vector, struct cpu_user_regs *regs) -{ - return false; -} - -#define debugger_trap_immediate() ((void)0) - -static inline bool debugger_trap_entry( - unsigned int vector, struct cpu_user_regs *regs) -{ - return false; -} - -#endif - -#ifdef CONFIG_GDBSX -unsigned int dbg_rw_mem(unsigned long gva, XEN_GUEST_HANDLE_PARAM(void) buf, - unsigned int len, domid_t domid, bool toaddr, - uint64_t pgd3); -#endif - -#endif /* __X86_DEBUGGER_H__ */ diff --git a/xen/include/asm-x86/debugreg.h b/xen/include/asm-x86/debugreg.h deleted file mode 100644 index c57914efc6..0000000000 --- a/xen/include/asm-x86/debugreg.h +++ /dev/null @@ -1,83 +0,0 @@ -#ifndef _X86_DEBUGREG_H -#define _X86_DEBUGREG_H - - -/* Indicate the register numbers for a number of the specific - debug registers. Registers 0-3 contain the addresses we wish to trap on */ - -#define DR_FIRSTADDR 0 -#define DR_LASTADDR 3 -#define DR_STATUS 6 -#define DR_CONTROL 7 - -/* Define a few things for the status register. We can use this to determine - which debugging register was responsible for the trap. The other bits - are either reserved or not of interest to us. */ - -#define DR_TRAP0 (0x1) /* db0 */ -#define DR_TRAP1 (0x2) /* db1 */ -#define DR_TRAP2 (0x4) /* db2 */ -#define DR_TRAP3 (0x8) /* db3 */ -#define DR_STEP (0x4000) /* single-step */ -#define DR_SWITCH (0x8000) /* task switch */ -#define DR_NOT_RTM (0x10000) /* clear: #BP inside RTM region */ -#define DR_STATUS_RESERVED_ZERO (~0xffffeffful) /* Reserved, read as zero */ -#define DR_STATUS_RESERVED_ONE 0xffff0ff0ul /* Reserved, read as one */ - -/* Now define a bunch of things for manipulating the control register. - The top two bytes of the control register consist of 4 fields of 4 - bits - each field corresponds to one of the four debug registers, - and indicates what types of access we trap on, and how large the data - field is that we are looking at */ - -#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */ -#define DR_CONTROL_SIZE 4 /* 4 control bits per register */ - -#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */ -#define DR_RW_WRITE (0x1) -#define DR_IO (0x2) -#define DR_RW_READ (0x3) - -#define DR_LEN_1 (0x0) /* Settings for data length to trap on */ -#define DR_LEN_2 (0x4) -#define DR_LEN_4 (0xC) -#define DR_LEN_8 (0x8) - -/* The low byte to the control register determine which registers are - enabled. There are 4 fields of two bits. One bit is "local", meaning - that the processor will reset the bit after a task switch and the other - is global meaning that we have to explicitly reset the bit. */ - -#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ -#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ -#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ - -#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ -#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */ - -#define DR7_ACTIVE_MASK (DR_LOCAL_ENABLE_MASK|DR_GLOBAL_ENABLE_MASK) - -/* The second byte to the control register has a few special things. - We can slow the instruction pipeline for instructions coming via the - gdt or the ldt if we want to. I am not sure why this is an advantage */ - -#define DR_CONTROL_RESERVED_ZERO (~0xffff27fful) /* Reserved, read as zero */ -#define DR_CONTROL_RESERVED_ONE (0x00000400ul) /* Reserved, read as one */ -#define DR_LOCAL_EXACT_ENABLE (0x00000100ul) /* Local exact enable */ -#define DR_GLOBAL_EXACT_ENABLE (0x00000200ul) /* Global exact enable */ -#define DR_RTM_ENABLE (0x00000800ul) /* RTM debugging enable */ -#define DR_GENERAL_DETECT (0x00002000ul) /* General detect enable */ - -#define write_debugreg(reg, val) do { \ - unsigned long __val = val; \ - asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \ -} while (0) -#define read_debugreg(reg) ({ \ - unsigned long __val; \ - asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \ - __val; \ -}) -long set_debugreg(struct vcpu *, unsigned int reg, unsigned long value); -void activate_debugregs(const struct vcpu *); - -#endif /* _X86_DEBUGREG_H */ diff --git a/xen/include/asm-x86/delay.h b/xen/include/asm-x86/delay.h deleted file mode 100644 index 9be2f46590..0000000000 --- a/xen/include/asm-x86/delay.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _X86_DELAY_H -#define _X86_DELAY_H - -/* - * Copyright (C) 1993 Linus Torvalds - * - * Delay routines calling functions in arch/i386/lib/delay.c - */ - -extern void __udelay(unsigned long usecs); -#define udelay(n) __udelay(n) - -#endif /* defined(_X86_DELAY_H) */ diff --git a/xen/include/asm-x86/desc.h b/xen/include/asm-x86/desc.h deleted file mode 100644 index 225a864c48..0000000000 --- a/xen/include/asm-x86/desc.h +++ /dev/null @@ -1,252 +0,0 @@ -#ifndef __ARCH_DESC_H -#define __ARCH_DESC_H - -#include - -/* - * Xen reserves a memory page of GDT entries. - * No guest GDT entries exist beyond the Xen reserved area. - */ -#define NR_RESERVED_GDT_PAGES 1 -#define NR_RESERVED_GDT_BYTES (NR_RESERVED_GDT_PAGES * PAGE_SIZE) -#define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8) - -#define LAST_RESERVED_GDT_PAGE \ - (FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1) -#define LAST_RESERVED_GDT_BYTE \ - (FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1) -#define LAST_RESERVED_GDT_ENTRY \ - (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1) - -#define LDT_ENTRY_SIZE 8 - -#define FLAT_COMPAT_RING1_CS 0xe019 /* GDT index 259 */ -#define FLAT_COMPAT_RING1_DS 0xe021 /* GDT index 260 */ -#define FLAT_COMPAT_RING1_SS 0xe021 /* GDT index 260 */ -#define FLAT_COMPAT_RING3_CS 0xe02b /* GDT index 261 */ -#define FLAT_COMPAT_RING3_DS 0xe033 /* GDT index 262 */ -#define FLAT_COMPAT_RING3_SS 0xe033 /* GDT index 262 */ - -#define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS -#define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS -#define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS -#define FLAT_COMPAT_USER_DS FLAT_COMPAT_RING3_DS -#define FLAT_COMPAT_USER_CS FLAT_COMPAT_RING3_CS -#define FLAT_COMPAT_USER_SS FLAT_COMPAT_RING3_SS - -#define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8) -#define LDT_ENTRY (TSS_ENTRY + 2) -#define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2) - -#define TSS_SELECTOR (TSS_ENTRY << 3) -#define LDT_SELECTOR (LDT_ENTRY << 3) -#define PER_CPU_SELECTOR (PER_CPU_GDT_ENTRY << 3) - -#ifndef __ASSEMBLY__ - -#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3) - -/* Fix up the RPL of a guest segment selector. */ -#define __fixup_guest_selector(d, sel) \ -({ \ - uint16_t _rpl = GUEST_KERNEL_RPL(d); \ - (sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \ -}) - -#define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss) -#define fixup_guest_code_selector(d, cs) __fixup_guest_selector(d, cs) - -/* - * We need this function because enforcing the correct guest kernel RPL is - * unsufficient if the selector is poked into an interrupt, trap or call gate. - * The selector RPL is ignored when a gate is accessed. We must therefore make - * sure that the selector does not reference a Xen-private segment. - * - * Note that selectors used only by IRET do not need to be checked. If the - * descriptor DPL fiffers from CS RPL then we'll #GP. - * - * Stack and data selectors do not need to be checked. If DS, ES, FS, GS are - * DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs - * from CS RPL then we'll #GP. - */ -#define guest_gate_selector_okay(d, sel) \ - ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \ - ((sel) == (!is_pv_32bit_domain(d) ? \ - FLAT_KERNEL_CS : /* Xen default seg? */ \ - FLAT_COMPAT_KERNEL_CS)) || \ - ((sel) & 4)) /* LDT seg? */ - -#endif /* __ASSEMBLY__ */ - -/* These are bitmasks for the high 32 bits of a descriptor table entry. */ -#define _SEGMENT_TYPE (15<< 8) -#define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code) - segment */ -#define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */ -#define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system - segments */ -#define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */ -#define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */ -#define _SEGMENT_P ( 1<<15) /* Segment Present */ -#define _SEGMENT_L ( 1<<21) /* 64-bit segment */ -#define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */ -#define _SEGMENT_G ( 1<<23) /* Granularity */ - -#ifndef __ASSEMBLY__ - -/* System Descriptor types for GDT and IDT entries. */ -#define SYS_DESC_tss16_avail 1 -#define SYS_DESC_ldt 2 -#define SYS_DESC_tss16_busy 3 -#define SYS_DESC_call_gate16 4 -#define SYS_DESC_task_gate 5 -#define SYS_DESC_irq_gate16 6 -#define SYS_DESC_trap_gate16 7 -#define SYS_DESC_tss_avail 9 -#define SYS_DESC_tss_busy 11 -#define SYS_DESC_call_gate 12 -#define SYS_DESC_irq_gate 14 -#define SYS_DESC_trap_gate 15 - -typedef union { - uint64_t raw; - struct { - uint32_t a, b; - }; -} seg_desc_t; - -typedef union { - struct { - uint64_t a, b; - }; - struct { - uint16_t addr0; - uint16_t cs; - uint8_t ist; /* :3, 5 bits rsvd, but this yields far better code. */ - uint8_t type:4, s:1, dpl:2, p:1; - uint16_t addr1; - uint32_t addr2; - /* 32 bits rsvd. */ - }; -} idt_entry_t; - -/* Write the lower 64 bits of an IDT Entry. This relies on the upper 32 - * bits of the address not changing, which is a safe assumption as all - * functions we are likely to load will live inside the 1GB - * code/data/bss address range. - * - * Ideally, we would use cmpxchg16b, but this is not supported on some - * old AMD 64bit capable processors, and has no safe equivalent. - */ -static inline void _write_gate_lower(volatile idt_entry_t *gate, - const idt_entry_t *new) -{ - ASSERT(gate->b == new->b); - gate->a = new->a; -} - -#define _set_gate(gate_addr,type,dpl,addr) \ -do { \ - (gate_addr)->a = 0; \ - smp_wmb(); /* disable gate /then/ rewrite */ \ - (gate_addr)->b = \ - ((unsigned long)(addr) >> 32); \ - smp_wmb(); /* rewrite /then/ enable gate */ \ - (gate_addr)->a = \ - (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \ - ((unsigned long)(dpl) << 45) | \ - ((unsigned long)(type) << 40) | \ - ((unsigned long)(addr) & 0xFFFFUL) | \ - ((unsigned long)__HYPERVISOR_CS << 16) | \ - (1UL << 47); \ -} while (0) - -static inline void _set_gate_lower(idt_entry_t *gate, unsigned long type, - unsigned long dpl, void *addr) -{ - idt_entry_t idte; - idte.b = gate->b; - idte.a = - (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | - ((unsigned long)(dpl) << 45) | - ((unsigned long)(type) << 40) | - ((unsigned long)(addr) & 0xFFFFUL) | - ((unsigned long)__HYPERVISOR_CS << 16) | - (1UL << 47); - _write_gate_lower(gate, &idte); -} - -/* Update the lower half handler of an IDT Entry, without changing any - * other configuration. */ -static inline void _update_gate_addr_lower(idt_entry_t *gate, void *addr) -{ - idt_entry_t idte; - idte.a = gate->a; - - idte.b = ((unsigned long)(addr) >> 32); - idte.a &= 0x0000FFFFFFFF0000ULL; - idte.a |= (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | - ((unsigned long)(addr) & 0xFFFFUL); - - _write_gate_lower(gate, &idte); -} - -#define _set_tssldt_desc(desc,addr,limit,type) \ -do { \ - (desc)[0].b = (desc)[1].b = 0; \ - smp_wmb(); /* disable entry /then/ rewrite */ \ - (desc)[0].a = \ - ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \ - (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \ - smp_wmb(); /* rewrite /then/ enable entry */ \ - (desc)[0].b = \ - ((u32)(addr) & 0xFF000000U) | \ - ((u32)(type) << 8) | 0x8000U | \ - (((u32)(addr) & 0x00FF0000U) >> 16); \ -} while (0) - -struct __packed desc_ptr { - unsigned short limit; - unsigned long base; -}; - -extern seg_desc_t boot_gdt[]; -DECLARE_PER_CPU(seg_desc_t *, gdt); -DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e); -extern seg_desc_t boot_compat_gdt[]; -DECLARE_PER_CPU(seg_desc_t *, compat_gdt); -DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e); -DECLARE_PER_CPU(bool, full_gdt_loaded); - -static inline void lgdt(const struct desc_ptr *gdtr) -{ - __asm__ __volatile__ ( "lgdt %0" :: "m" (*gdtr) : "memory" ); -} - -static inline void lidt(const struct desc_ptr *idtr) -{ - __asm__ __volatile__ ( "lidt %0" :: "m" (*idtr) : "memory" ); -} - -static inline void lldt(unsigned int sel) -{ - __asm__ __volatile__ ( "lldt %w0" :: "rm" (sel) : "memory" ); -} - -static inline void ltr(unsigned int sel) -{ - __asm__ __volatile__ ( "ltr %w0" :: "rm" (sel) : "memory" ); -} - -static inline unsigned int str(void) -{ - unsigned int sel; - - __asm__ ( "str %0" : "=r" (sel) ); - - return sel; -} - -#endif /* !__ASSEMBLY__ */ - -#endif /* __ARCH_DESC_H */ diff --git a/xen/include/asm-x86/device.h b/xen/include/asm-x86/device.h deleted file mode 100644 index f2acc7effd..0000000000 --- a/xen/include/asm-x86/device.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_X86_DEVICE_H -#define __ASM_X86_DEVICE_H - -#include - -/* - * x86 only supports PCI. Therefore it's possible to directly use - * pci_dev to avoid adding new field. - */ - -typedef struct pci_dev device_t; - -#define dev_is_pci(dev) ((void)(dev), 1) -#define pci_to_dev(pci) (pci) - -#endif /* __ASM_X86_DEVICE_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/div64.h b/xen/include/asm-x86/div64.h deleted file mode 100644 index dd49f64a3b..0000000000 --- a/xen/include/asm-x86/div64.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __X86_DIV64 -#define __X86_DIV64 - -#include - -#define do_div(n,base) ({ \ - uint32_t __base = (base); \ - uint32_t __rem; \ - __rem = ((uint64_t)(n)) % __base; \ - (n) = ((uint64_t)(n)) / __base; \ - __rem; \ -}) - -#endif diff --git a/xen/include/asm-x86/dom0_build.h b/xen/include/asm-x86/dom0_build.h deleted file mode 100644 index a5f8c9e67f..0000000000 --- a/xen/include/asm-x86/dom0_build.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _DOM0_BUILD_H_ -#define _DOM0_BUILD_H_ - -#include -#include - -#include - -extern unsigned int dom0_memflags; - -unsigned long dom0_compute_nr_pages(struct domain *d, - struct elf_dom_parms *parms, - unsigned long initrd_len); -int dom0_setup_permissions(struct domain *d); - -int dom0_construct_pv(struct domain *d, const module_t *image, - unsigned long image_headroom, - module_t *initrd, - char *cmdline); - -int dom0_construct_pvh(struct domain *d, const module_t *image, - unsigned long image_headroom, - module_t *initrd, - char *cmdline); - -unsigned long dom0_paging_pages(const struct domain *d, - unsigned long nr_pages); - -void dom0_update_physmap(bool compat, unsigned long pfn, - unsigned long mfn, unsigned long vphysmap_s); - -#endif /* _DOM0_BUILD_H_ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h deleted file mode 100644 index 92d54de0b9..0000000000 --- a/xen/include/asm-x86/domain.h +++ /dev/null @@ -1,769 +0,0 @@ -#ifndef __ASM_DOMAIN_H__ -#define __ASM_DOMAIN_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo) - -#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ - (d)->arch.hvm.irq->callback_via_type == HVMIRQ_callback_vector) -#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain)) -#define is_domain_direct_mapped(d) ((void)(d), 0) - -#define VCPU_TRAP_NONE 0 -#define VCPU_TRAP_NMI 1 -#define VCPU_TRAP_MCE 2 -#define VCPU_TRAP_LAST VCPU_TRAP_MCE - -#define nmi_state async_exception_state(VCPU_TRAP_NMI) -#define mce_state async_exception_state(VCPU_TRAP_MCE) - -#define nmi_pending nmi_state.pending -#define mce_pending mce_state.pending - -struct trap_bounce { - uint32_t error_code; - uint8_t flags; /* TBF_ */ - uint16_t cs; - unsigned long eip; -}; - -#define MAPHASH_ENTRIES 8 -#define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1)) -#define MAPHASHENT_NOTINUSE ((u32)~0U) -struct mapcache_vcpu { - /* Shadow of mapcache_domain.epoch. */ - unsigned int shadow_epoch; - - /* Lock-free per-VCPU hash of recently-used mappings. */ - struct vcpu_maphash_entry { - unsigned long mfn; - uint32_t idx; - uint32_t refcnt; - } hash[MAPHASH_ENTRIES]; -}; - -struct mapcache_domain { - /* The number of array entries, and a cursor into the array. */ - unsigned int entries; - unsigned int cursor; - - /* Protects map_domain_page(). */ - spinlock_t lock; - - /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */ - unsigned int epoch; - u32 tlbflush_timestamp; - - /* Which mappings are in use, and which are garbage to reap next epoch? */ - unsigned long *inuse; - unsigned long *garbage; -}; - -int mapcache_domain_init(struct domain *); -int mapcache_vcpu_init(struct vcpu *); -void mapcache_override_current(struct vcpu *); - -/* x86/64: toggle guest between kernel and user modes. */ -void toggle_guest_mode(struct vcpu *); -/* x86/64: toggle guest page tables between kernel and user modes. */ -void toggle_guest_pt(struct vcpu *); - -void cpuid_policy_updated(struct vcpu *v); - -/* - * Initialise a hypercall-transfer page. The given pointer must be mapped - * in Xen virtual address space (accesses are not validated or checked). - */ -void init_hypercall_page(struct domain *d, void *); - -/************************************************/ -/* shadow paging extension */ -/************************************************/ -struct shadow_domain { -#ifdef CONFIG_SHADOW_PAGING - unsigned int opt_flags; /* runtime tunable optimizations on/off */ - struct page_list_head pinned_shadows; - - /* Memory allocation */ - struct page_list_head freelist; - unsigned int total_pages; /* number of pages allocated */ - unsigned int free_pages; /* number of pages on freelists */ - unsigned int p2m_pages; /* number of pages allocated to p2m */ - - /* 1-to-1 map for use when HVM vcpus have paging disabled */ - pagetable_t unpaged_pagetable; - - /* reflect guest table dirty status, incremented by write - * emulation and remove write permission */ - atomic_t gtable_dirty_version; - - /* Shadow hashtable */ - struct page_info **hash_table; - bool_t hash_walking; /* Some function is walking the hash table */ - - /* Fast MMIO path heuristic */ - bool has_fast_mmio_entries; - - /* OOS */ - bool_t oos_active; - -#ifdef CONFIG_HVM - /* Has this domain ever used HVMOP_pagetable_dying? */ - bool_t pagetable_dying_op; -#endif - -#ifdef CONFIG_PV - /* PV L1 Terminal Fault mitigation. */ - struct tasklet pv_l1tf_tasklet; -#endif /* CONFIG_PV */ -#endif -}; - -struct shadow_vcpu { -#ifdef CONFIG_SHADOW_PAGING -#ifdef CONFIG_HVM - /* PAE guests: per-vcpu shadow top-level table */ - l3_pgentry_t l3table[4] __attribute__((__aligned__(32))); - /* PAE guests: per-vcpu cache of the top-level *guest* entries */ - l3_pgentry_t gl3e[4] __attribute__((__aligned__(32))); - - /* shadow(s) of guest (MFN) */ - pagetable_t shadow_table[4]; -#else - /* shadow of guest (MFN) */ - pagetable_t shadow_table[1]; -#endif - - /* Last MFN that we emulated a write to as unshadow heuristics. */ - unsigned long last_emulated_mfn_for_unshadow; - /* MFN of the last shadow that we shot a writeable mapping in */ - unsigned long last_writeable_pte_smfn; -#ifdef CONFIG_HVM - /* Last frame number that we emulated a write to. */ - unsigned long last_emulated_frame; - /* Last MFN that we emulated a write successfully */ - unsigned long last_emulated_mfn; -#endif - - /* Shadow out-of-sync: pages that this vcpu has let go out of sync */ - mfn_t oos[SHADOW_OOS_PAGES]; - mfn_t oos_snapshot[SHADOW_OOS_PAGES]; - struct oos_fixup { - int next; - mfn_t smfn[SHADOW_OOS_FIXUPS]; - unsigned long off[SHADOW_OOS_FIXUPS]; - } oos_fixup[SHADOW_OOS_PAGES]; - -#ifdef CONFIG_HVM - bool_t pagetable_dying; -#endif -#endif -}; - -/************************************************/ -/* hardware assisted paging */ -/************************************************/ -struct hap_domain { - struct page_list_head freelist; - unsigned int total_pages; /* number of pages allocated */ - unsigned int free_pages; /* number of pages on freelists */ - unsigned int p2m_pages; /* number of pages allocated to p2m */ -}; - -/************************************************/ -/* common paging data structure */ -/************************************************/ -struct log_dirty_domain { - /* log-dirty radix tree to record dirty pages */ - mfn_t top; - unsigned int allocs; - unsigned int failed_allocs; - - /* log-dirty mode stats */ - unsigned long fault_count; - unsigned long dirty_count; - - /* functions which are paging mode specific */ - const struct log_dirty_ops { - int (*enable )(struct domain *d, bool log_global); - int (*disable )(struct domain *d); - void (*clean )(struct domain *d); - } *ops; -}; - -struct paging_domain { - /* paging lock */ - mm_lock_t lock; - - /* flags to control paging operation */ - u32 mode; - /* Has that pool ever run out of memory? */ - bool_t p2m_alloc_failed; - /* extension for shadow paging support */ - struct shadow_domain shadow; - /* extension for hardware-assited paging */ - struct hap_domain hap; - /* log dirty support */ - struct log_dirty_domain log_dirty; - - /* preemption handling */ - struct { - const struct domain *dom; - unsigned int op; - union { - struct { - unsigned long done:PADDR_BITS - PAGE_SHIFT; - unsigned long i4:PAGETABLE_ORDER; - unsigned long i3:PAGETABLE_ORDER; - } log_dirty; - }; - } preempt; - - /* alloc/free pages from the pool for paging-assistance structures - * (used by p2m and log-dirty code for their tries) */ - struct page_info * (*alloc_page)(struct domain *d); - void (*free_page)(struct domain *d, struct page_info *pg); -}; - -struct paging_vcpu { - /* Pointers to mode-specific entry points. */ - const struct paging_mode *mode; - /* Nested Virtualization: paging mode of nested guest */ - const struct paging_mode *nestedmode; -#ifdef CONFIG_HVM - /* HVM guest: last emulate was to a pagetable */ - unsigned int last_write_was_pt:1; - /* HVM guest: last write emulation succeeds */ - unsigned int last_write_emul_ok:1; -#endif - /* Translated guest: virtual TLB */ - struct shadow_vtlb *vtlb; - spinlock_t vtlb_lock; - - /* paging support extension */ - struct shadow_vcpu shadow; -}; - -#define MAX_NESTEDP2M 10 - -#define MAX_ALTP2M 10 /* arbitrary */ -#define INVALID_ALTP2M 0xffff -#define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t)) -struct p2m_domain; -struct time_scale { - int shift; - u32 mul_frac; -}; - -struct pv_domain -{ - l1_pgentry_t **gdt_ldt_l1tab; - - atomic_t nr_l4_pages; - - /* Is a 32-bit PV guest? */ - bool is_32bit; - /* XPTI active? */ - bool xpti; - /* Use PCID feature? */ - bool pcid; - /* Mitigate L1TF with shadow/crashing? */ - bool check_l1tf; - - /* map_domain_page() mapping cache. */ - struct mapcache_domain mapcache; - - struct cpuidmasks *cpuidmasks; -}; - -struct monitor_write_data { - struct { - unsigned int msr : 1; - unsigned int cr0 : 1; - unsigned int cr3 : 1; - unsigned int cr4 : 1; - } do_write; - - bool cr3_noflush; - - uint32_t msr; - uint64_t value; - uint64_t cr0; - uint64_t cr3; - uint64_t cr4; -}; - -struct arch_domain -{ - struct page_info *perdomain_l3_pg; - -#ifdef CONFIG_PV32 - unsigned int hv_compat_vstart; -#endif - - /* Maximum physical-address bitwidth supported by this guest. */ - unsigned int physaddr_bitsize; - - /* I/O-port admin-specified access capabilities. */ - struct rangeset *ioport_caps; - uint32_t pci_cf8; - uint8_t cmos_idx; - - union { - struct pv_domain pv; - struct hvm_domain hvm; - }; - - struct paging_domain paging; - struct p2m_domain *p2m; - /* To enforce lock ordering in the pod code wrt the - * page_alloc lock */ - int page_alloc_unlock_level; - - /* Continuable domain_relinquish_resources(). */ - unsigned int rel_priv; - struct page_list_head relmem_list; - - const struct arch_csw { - void (*from)(struct vcpu *); - void (*to)(struct vcpu *); - void noreturn (*tail)(void); - } *ctxt_switch; - -#ifdef CONFIG_HVM - /* nestedhvm: translate l2 guest physical to host physical */ - struct p2m_domain *nested_p2m[MAX_NESTEDP2M]; - mm_lock_t nested_p2m_lock; - - /* altp2m: allow multiple copies of host p2m */ - bool_t altp2m_active; - struct p2m_domain *altp2m_p2m[MAX_ALTP2M]; - mm_lock_t altp2m_list_lock; - uint64_t *altp2m_eptp; - uint64_t *altp2m_visible_eptp; -#endif - - /* NB. protected by d->event_lock and by irq_desc[irq].lock */ - struct radix_tree_root irq_pirq; - - /* Is shared-info page in 32-bit format? */ - bool_t has_32bit_shinfo; - - /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */ - bool_t auto_unmask; - - /* - * The width of the FIP/FDP register in the FPU that needs to be - * saved/restored during a context switch. This is needed because - * the FPU can either: a) restore the 64-bit FIP/FDP and clear FCS - * and FDS; or b) restore the 32-bit FIP/FDP (clearing the upper - * 32-bits of FIP/FDP) and restore FCS/FDS. - * - * Which one is needed depends on the guest. - * - * This can be either: 8, 4 or 0. 0 means auto-detect the size - * based on the width of FIP/FDP values that are written by the - * guest. - */ - uint8_t x87_fip_width; - - /* CPUID and MSR policy objects. */ - struct cpuid_policy *cpuid; - struct msr_policy *msr; - - struct PITState vpit; - - /* TSC management (emulation, pv, scaling, stats) */ - int tsc_mode; /* see include/asm-x86/time.h */ - bool_t vtsc; /* tsc is emulated (may change after migrate) */ - s_time_t vtsc_last; /* previous TSC value (guarantee monotonicity) */ - uint64_t vtsc_offset; /* adjustment for save/restore/migrate */ - uint32_t tsc_khz; /* cached guest khz for certain emulated or - hardware TSC scaling cases */ - struct time_scale vtsc_to_ns; /* scaling for certain emulated or - hardware TSC scaling cases */ - struct time_scale ns_to_vtsc; /* scaling for certain emulated or - hardware TSC scaling cases */ - uint32_t incarnation; /* incremented every restore or live migrate - (possibly other cases in the future */ - - /* Pseudophysical e820 map (XENMEM_memory_map). */ - spinlock_t e820_lock; - struct e820entry *e820; - unsigned int nr_e820; - - /* RMID assigned to the domain for CMT */ - unsigned int psr_rmid; - /* COS assigned to the domain for each socket */ - unsigned int *psr_cos_ids; - - /* Shared page for notifying that explicit PIRQ EOI is required. */ - unsigned long *pirq_eoi_map; - unsigned long pirq_eoi_map_mfn; - - /* Arch-specific monitor options */ - struct { - unsigned int write_ctrlreg_enabled : 4; - unsigned int write_ctrlreg_sync : 4; - unsigned int write_ctrlreg_onchangeonly : 4; - unsigned int singlestep_enabled : 1; - unsigned int software_breakpoint_enabled : 1; - unsigned int debug_exception_enabled : 1; - unsigned int debug_exception_sync : 1; - unsigned int cpuid_enabled : 1; - unsigned int descriptor_access_enabled : 1; - unsigned int guest_request_userspace_enabled : 1; - unsigned int emul_unimplemented_enabled : 1; - /* - * By default all events are sent. - * This is used to filter out pagefaults. - */ - unsigned int inguest_pagefault_disabled : 1; - unsigned int control_register_values : 1; - struct monitor_msr_bitmap *msr_bitmap; - uint64_t write_ctrlreg_mask[4]; - } monitor; - - /* Mem_access emulation control */ - bool_t mem_access_emulate_each_rep; - - /* Don't unconditionally inject #GP for unhandled MSRs. */ - bool msr_relaxed; - - /* Emulated devices enabled bitmap. */ - uint32_t emulation_flags; -} __cacheline_aligned; - -#ifdef CONFIG_HVM -#define X86_EMU_LAPIC XEN_X86_EMU_LAPIC -#define X86_EMU_HPET XEN_X86_EMU_HPET -#define X86_EMU_PM XEN_X86_EMU_PM -#define X86_EMU_RTC XEN_X86_EMU_RTC -#define X86_EMU_IOAPIC XEN_X86_EMU_IOAPIC -#define X86_EMU_PIC XEN_X86_EMU_PIC -#define X86_EMU_VGA XEN_X86_EMU_VGA -#define X86_EMU_IOMMU XEN_X86_EMU_IOMMU -#define X86_EMU_USE_PIRQ XEN_X86_EMU_USE_PIRQ -#define X86_EMU_VPCI XEN_X86_EMU_VPCI -#else -#define X86_EMU_LAPIC 0 -#define X86_EMU_HPET 0 -#define X86_EMU_PM 0 -#define X86_EMU_RTC 0 -#define X86_EMU_IOAPIC 0 -#define X86_EMU_PIC 0 -#define X86_EMU_VGA 0 -#define X86_EMU_IOMMU 0 -#define X86_EMU_USE_PIRQ 0 -#define X86_EMU_VPCI 0 -#endif - -#define X86_EMU_PIT XEN_X86_EMU_PIT - -/* This must match XEN_X86_EMU_ALL in xen.h */ -#define X86_EMU_ALL (X86_EMU_LAPIC | X86_EMU_HPET | \ - X86_EMU_PM | X86_EMU_RTC | \ - X86_EMU_IOAPIC | X86_EMU_PIC | \ - X86_EMU_VGA | X86_EMU_IOMMU | \ - X86_EMU_PIT | X86_EMU_USE_PIRQ | \ - X86_EMU_VPCI) - -#define has_vlapic(d) (!!((d)->arch.emulation_flags & X86_EMU_LAPIC)) -#define has_vhpet(d) (!!((d)->arch.emulation_flags & X86_EMU_HPET)) -#define has_vpm(d) (!!((d)->arch.emulation_flags & X86_EMU_PM)) -#define has_vrtc(d) (!!((d)->arch.emulation_flags & X86_EMU_RTC)) -#define has_vioapic(d) (!!((d)->arch.emulation_flags & X86_EMU_IOAPIC)) -#define has_vpic(d) (!!((d)->arch.emulation_flags & X86_EMU_PIC)) -#define has_vvga(d) (!!((d)->arch.emulation_flags & X86_EMU_VGA)) -#define has_viommu(d) (!!((d)->arch.emulation_flags & X86_EMU_IOMMU)) -#define has_vpit(d) (!!((d)->arch.emulation_flags & X86_EMU_PIT)) -#define has_pirq(d) (!!((d)->arch.emulation_flags & X86_EMU_USE_PIRQ)) -#define has_vpci(d) (!!((d)->arch.emulation_flags & X86_EMU_VPCI)) - -#define gdt_ldt_pt_idx(v) \ - ((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT)) -#define pv_gdt_ptes(v) \ - ((v)->domain->arch.pv.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \ - (((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))) -#define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16) - -struct pv_vcpu -{ - /* map_domain_page() mapping cache. */ - struct mapcache_vcpu mapcache; - - unsigned int vgc_flags; - - struct trap_info *trap_ctxt; - - unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE]; - unsigned long ldt_base; - unsigned int gdt_ents, ldt_ents; - - unsigned long kernel_ss, kernel_sp; - unsigned long ctrlreg[8]; - - unsigned long event_callback_eip; - unsigned long failsafe_callback_eip; - union { - unsigned long syscall_callback_eip; - struct { - unsigned int event_callback_cs; - unsigned int failsafe_callback_cs; - }; - }; - - unsigned long syscall32_callback_eip; - unsigned long sysenter_callback_eip; - unsigned short syscall32_callback_cs; - unsigned short sysenter_callback_cs; - bool_t syscall32_disables_events; - bool_t sysenter_disables_events; - - /* - * 64bit segment bases. - * - * FS and the active GS are always stale when the vCPU is in context, as - * the guest can change them behind Xen's back with MOV SREG, or - * WR{FS,GS}BASE on capable hardware. - * - * The inactive GS base is never stale, as guests can't use SWAPGS to - * access it - all modification is performed by Xen either directly - * (hypercall, #GP emulation), or indirectly (toggle_guest_mode()). - * - * The vCPU context switch path is optimised based on this fact, so any - * path updating or swapping the inactive base must update the cached - * value as well. - * - * Which GS base is active and inactive depends on whether the vCPU is in - * user or kernel context. - */ - unsigned long fs_base; - unsigned long gs_base_kernel; - unsigned long gs_base_user; - - /* Bounce information for propagating an exception to guest OS. */ - struct trap_bounce trap_bounce; - - /* I/O-port access bitmap. */ - XEN_GUEST_HANDLE(uint8) iobmp; /* Guest kernel vaddr of the bitmap. */ - unsigned int iobmp_limit; /* Number of ports represented in the bitmap. */ -#define IOPL(val) MASK_INSR(val, X86_EFLAGS_IOPL) - unsigned int iopl; /* Current IOPL for this VCPU, shifted left by - * 12 to match the eflags register. */ - - /* - * %dr7 bits the guest has set, but aren't loaded into hardware, and are - * completely emulated. - */ - uint32_t dr7_emul; - - /* Deferred VA-based update state. */ - bool_t need_update_runstate_area; - struct vcpu_time_info pending_system_time; -}; - -struct arch_vcpu -{ - /* - * guest context (mirroring struct vcpu_guest_context) common - * between pv and hvm guests - */ - - void *fpu_ctxt; - struct cpu_user_regs user_regs; - - /* Debug registers. */ - unsigned long dr[4]; - unsigned long dr7; /* Ideally int, but __vmread() needs long. */ - unsigned int dr6; - - /* other state */ - - unsigned long flags; /* TF_ */ - - struct vpmu_struct vpmu; - - struct { - bool pending; - uint8_t old_mask; - } async_exception_state[VCPU_TRAP_LAST]; -#define async_exception_state(t) async_exception_state[(t)-1] - uint8_t async_exception_mask; - - /* Virtual Machine Extensions */ - union { - struct pv_vcpu pv; - struct hvm_vcpu hvm; - }; - - /* - * guest_table{,_user} hold a ref to the page, and also a type-count - * unless shadow refcounts are in use - */ - pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */ - pagetable_t guest_table; /* (MFN) guest notion of cr3 */ - struct page_info *old_guest_table; /* partially destructed pagetable */ - struct page_info *old_guest_ptpg; /* containing page table of the */ - /* former, if any */ - bool old_guest_table_partial; /* Are we dropping a type ref, or just - * finishing up a partial de-validation? */ - - unsigned long cr3; /* (MA) value to install in HW CR3 */ - - /* - * The save area for Processor Extended States and the bitmask of the - * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has - * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in - * #NM handler, we XRSTOR the states we XSAVE-ed; - */ - struct xsave_struct *xsave_area; - uint64_t xcr0; - /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen - * itself, as we can never know whether guest OS depends on content - * preservation whenever guest OS clears one feature flag (for example, - * temporarily). - * However, processor should not be able to touch eXtended states before - * it explicitly enables it via xcr0. - */ - uint64_t xcr0_accum; - /* This variable determines whether nonlazy extended state has been used, - * and thus should be saved/restored. */ - bool_t nonlazy_xstate_used; - - /* Restore all FPU state (lazy and non-lazy state) on context switch? */ - bool fully_eager_fpu; - - struct vmce vmce; - - struct paging_vcpu paging; - - uint32_t gdbsx_vcpu_event; - - /* A secondary copy of the vcpu time info. */ - XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest; - - struct arch_vm_event *vm_event; - - struct vcpu_msrs *msrs; - - struct { - bool next_interrupt_enabled; - } monitor; -}; - -struct guest_memory_policy -{ - bool nested_guest_mode; -}; - -void update_guest_memory_policy(struct vcpu *v, - struct guest_memory_policy *policy); - -void domain_cpu_policy_changed(struct domain *d); - -bool update_runstate_area(struct vcpu *); -bool update_secondary_system_time(struct vcpu *, - struct vcpu_time_info *); - -void vcpu_show_execution_state(struct vcpu *); -void vcpu_show_registers(const struct vcpu *); - -static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void) -{ - return vmalloc(sizeof(struct vcpu_guest_context)); -} - -static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc) -{ - vfree(vgc); -} - -void arch_vcpu_regs_init(struct vcpu *v); - -struct vcpu_hvm_context; -int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context *ctx); - -#ifdef CONFIG_PV -void pv_inject_event(const struct x86_event *event); -#else -static inline void pv_inject_event(const struct x86_event *event) -{ - ASSERT_UNREACHABLE(); -} -#endif - -static inline void pv_inject_hw_exception(unsigned int vector, int errcode) -{ - const struct x86_event event = { - .vector = vector, - .type = X86_EVENTTYPE_HW_EXCEPTION, - .error_code = errcode, - }; - - pv_inject_event(&event); -} - -static inline void pv_inject_page_fault(int errcode, unsigned long cr2) -{ - const struct x86_event event = { - .vector = TRAP_page_fault, - .type = X86_EVENTTYPE_HW_EXCEPTION, - .error_code = errcode, - .cr2 = cr2, - }; - - pv_inject_event(&event); -} - -static inline void pv_inject_sw_interrupt(unsigned int vector) -{ - const struct x86_event event = { - .vector = vector, - .type = X86_EVENTTYPE_SW_INTERRUPT, - .error_code = X86_EVENT_NO_EC, - }; - - pv_inject_event(&event); -} - -#define PV32_VM_ASSIST_MASK ((1UL << VMASST_TYPE_4gb_segments) | \ - (1UL << VMASST_TYPE_4gb_segments_notify) | \ - (1UL << VMASST_TYPE_writable_pagetables) | \ - (1UL << VMASST_TYPE_pae_extended_cr3) | \ - (1UL << VMASST_TYPE_architectural_iopl) | \ - (1UL << VMASST_TYPE_runstate_update_flag)) -/* - * Various of what PV32_VM_ASSIST_MASK has isn't really applicable to 64-bit, - * but we can't make such requests fail all of the sudden. - */ -#define PV64_VM_ASSIST_MASK (PV32_VM_ASSIST_MASK | \ - (1UL << VMASST_TYPE_m2p_strict)) -#define HVM_VM_ASSIST_MASK (1UL << VMASST_TYPE_runstate_update_flag) - -#define arch_vm_assist_valid_mask(d) \ - (is_hvm_domain(d) ? HVM_VM_ASSIST_MASK \ - : is_pv_32bit_domain(d) ? PV32_VM_ASSIST_MASK \ - : PV64_VM_ASSIST_MASK) - -#endif /* __ASM_DOMAIN_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/e820.h b/xen/include/asm-x86/e820.h deleted file mode 100644 index 9d8f1ba960..0000000000 --- a/xen/include/asm-x86/e820.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef __E820_HEADER -#define __E820_HEADER - -/* - * PC BIOS standard E820 types and structure. - */ -#define E820_RAM 1 -#define E820_RESERVED 2 -#define E820_ACPI 3 -#define E820_NVS 4 -#define E820_UNUSABLE 5 - -struct __packed e820entry { - uint64_t addr; - uint64_t size; - uint32_t type; -}; - -#define E820MAX 1024 - -struct e820map { - unsigned int nr_map; - struct e820entry map[E820MAX]; -}; - -extern int sanitize_e820_map(struct e820entry *biosmap, unsigned int *pnr_map); -extern int e820_all_mapped(u64 start, u64 end, unsigned type); -extern int reserve_e820_ram(struct e820map *e820, uint64_t s, uint64_t e); -extern int e820_change_range_type( - struct e820map *e820, uint64_t s, uint64_t e, - uint32_t orig_type, uint32_t new_type); -extern int e820_add_range( - struct e820map *, uint64_t s, uint64_t e, uint32_t type); -extern unsigned long init_e820(const char *, struct e820map *); -extern struct e820map e820; -extern struct e820map e820_raw; - -/* These symbols live in the boot trampoline. */ -extern struct e820map bios_e820map[]; -extern unsigned int bios_e820nr; - -#endif /*__E820_HEADER*/ diff --git a/xen/include/asm-x86/edd.h b/xen/include/asm-x86/edd.h deleted file mode 100644 index afaa23732a..0000000000 --- a/xen/include/asm-x86/edd.h +++ /dev/null @@ -1,164 +0,0 @@ -/****************************************************************************** - * edd.h - * - * Copyright (C) 2002, 2003, 2004 Dell Inc. - * by Matt Domsch - * - * structures and definitions for the int 13h, ax={41,48}h - * BIOS Enhanced Disk Drive Services - * This is based on the T13 group document D1572 Revision 0 (August 14 2002) - * available at http://www.t13.org/docs2002/d1572r0.pdf. It is - * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License v2.0 as published by - * the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __XEN_EDD_H__ -#define __XEN_EDD_H__ - -#ifndef __ASSEMBLY__ - -struct __packed edd_info { - /* Int13, Fn48: Check Extensions Present. */ - u8 device; /* %dl: device */ - u8 version; /* %ah: major version */ - u16 interface_support; /* %cx: interface support bitmap */ - /* Int13, Fn08: Legacy Get Device Parameters. */ - u16 legacy_max_cylinder; /* %cl[7:6]:%ch: maximum cylinder number */ - u8 legacy_max_head; /* %dh: maximum head number */ - u8 legacy_sectors_per_track; /* %cl[5:0]: maximum sector number */ - /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ - struct __packed edd_device_params { - u16 length; - u16 info_flags; - u32 num_default_cylinders; - u32 num_default_heads; - u32 sectors_per_track; - u64 number_of_sectors; - u16 bytes_per_sector; - u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */ - u16 key; /* = 0xBEDD */ - u8 device_path_info_length; - u8 reserved2; - u16 reserved3; - u8 host_bus_type[4]; - u8 interface_type[8]; - union { - struct __packed { - u16 base_address; - u16 reserved1; - u32 reserved2; - } isa; - struct __packed { - u8 bus; - u8 slot; - u8 function; - u8 channel; - u32 reserved; - } pci; - /* pcix is same as pci */ - struct __packed { - u64 reserved; - } ibnd; - struct __packed { - u64 reserved; - } xprs; - struct __packed { - u64 reserved; - } htpt; - struct __packed { - u64 reserved; - } unknown; - } interface_path; - union { - struct __packed { - u8 device; - u8 reserved1; - u16 reserved2; - u32 reserved3; - u64 reserved4; - } ata; - struct __packed { - u8 device; - u8 lun; - u8 reserved1; - u8 reserved2; - u32 reserved3; - u64 reserved4; - } atapi; - struct __packed { - u16 id; - u64 lun; - u16 reserved1; - u32 reserved2; - } scsi; - struct __packed { - u64 serial_number; - u64 reserved; - } usb; - struct __packed { - u64 eui; - u64 reserved; - } i1394; - struct __packed { - u64 wwid; - u64 lun; - } fibre; - struct __packed { - u64 identity_tag; - u64 reserved; - } i2o; - struct __packed { - u32 array_number; - u32 reserved1; - u64 reserved2; - } raid; - struct __packed { - u8 device; - u8 reserved1; - u16 reserved2; - u32 reserved3; - u64 reserved4; - } sata; - struct __packed { - u64 reserved1; - u64 reserved2; - } unknown; - } device_path; - u8 reserved4; - u8 checksum; - } edd_device_params; -}; - -struct __packed mbr_signature { - u8 device; - u8 pad[3]; - u32 signature; -}; - -/* These all reside in the boot trampoline. Access via bootsym(). */ -extern struct mbr_signature boot_mbr_signature[]; -extern u8 boot_mbr_signature_nr; -extern struct edd_info boot_edd_info[]; -extern u8 boot_edd_info_nr; - -#endif /* __ASSEMBLY__ */ - -/* Maximum number of EDD information structures at boot_edd_info. */ -#define EDD_INFO_MAX 6 - -/* Maximum number of MBR signatures at boot_mbr_signature. */ -#define EDD_MBR_SIG_MAX 16 - -/* Size of components of EDD information structure. */ -#define EDDEXTSIZE 8 -#define EDDPARMSIZE 74 - -#endif /* __XEN_EDD_H__ */ diff --git a/xen/include/asm-x86/efibind.h b/xen/include/asm-x86/efibind.h deleted file mode 100644 index bce02f3707..0000000000 --- a/xen/include/asm-x86/efibind.h +++ /dev/null @@ -1,2 +0,0 @@ -#include -#include diff --git a/xen/include/asm-x86/elf.h b/xen/include/asm-x86/elf.h deleted file mode 100644 index 1d7ea96e22..0000000000 --- a/xen/include/asm-x86/elf.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __X86_ELF_H__ -#define __X86_ELF_H__ - -typedef struct { - unsigned long cr0, cr2, cr3, cr4; -} crash_xen_core_t; - -#include - -#endif /* __X86_ELF_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h deleted file mode 100644 index 5e09ede6d7..0000000000 --- a/xen/include/asm-x86/event.h +++ /dev/null @@ -1,56 +0,0 @@ -/****************************************************************************** - * event.h - * - * A nice interface for passing asynchronous events to guest OSes. - * (architecture-dependent part) - * - */ - -#ifndef __ASM_EVENT_H__ -#define __ASM_EVENT_H__ - -#include - -void vcpu_kick(struct vcpu *v); -void vcpu_mark_events_pending(struct vcpu *v); - -static inline int vcpu_event_delivery_is_enabled(struct vcpu *v) -{ - return !vcpu_info(v, evtchn_upcall_mask); -} - -int hvm_local_events_need_delivery(struct vcpu *v); -static always_inline bool local_events_need_delivery(void) -{ - struct vcpu *v = current; - - ASSERT(!is_idle_vcpu(v)); - - return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) : - (vcpu_info(v, evtchn_upcall_pending) && - !vcpu_info(v, evtchn_upcall_mask))); -} - -static inline void local_event_delivery_disable(void) -{ - vcpu_info(current, evtchn_upcall_mask) = 1; -} - -static inline void local_event_delivery_enable(void) -{ - vcpu_info(current, evtchn_upcall_mask) = 0; -} - -/* No arch specific virq definition now. Default to global. */ -static inline bool arch_virq_is_global(unsigned int virq) -{ - return true; -} - -#ifdef CONFIG_PV_SHIM -# include -# define arch_evtchn_is_special(chn) \ - (pv_shim && (chn)->port && (chn)->state == ECS_RESERVED) -#endif - -#endif diff --git a/xen/include/asm-x86/fixmap.h b/xen/include/asm-x86/fixmap.h deleted file mode 100644 index 20746afd0a..0000000000 --- a/xen/include/asm-x86/fixmap.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * fixmap.h: compile-time virtual memory allocation - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998 Ingo Molnar - * Modifications for Xen are copyright (c) 2002-2004, K A Fraser - */ - -#ifndef _ASM_FIXMAP_H -#define _ASM_FIXMAP_H - -#include - -#define FIXADDR_TOP (VMAP_VIRT_END - PAGE_SIZE) -#define FIXADDR_X_TOP (XEN_VIRT_END - PAGE_SIZE) - -#ifndef __ASSEMBLY__ - -#include -#include -#include -#include -#include - -/* - * Here we define all the compile-time 'special' virtual - * addresses. The point is to have a constant address at - * compile time, but to set the physical address only - * in the boot process. We allocate these special addresses - * from the end of virtual memory backwards. - */ -enum fixed_addresses { - /* Index 0 is reserved since fix_to_virt(0) == FIXADDR_TOP. */ - FIX_RESERVED, - /* - * Indexes using the page tables set up before entering __start_xen() - * must be among the first (L1_PAGETABLE_ENTRIES - 1) entries. - * These are generally those needed by the various console drivers. - */ - FIX_COM_BEGIN, - FIX_COM_END, - FIX_EHCI_DBGP, -#ifdef CONFIG_XEN_GUEST - FIX_PV_CONSOLE, - FIX_XEN_SHARED_INFO, -#endif /* CONFIG_XEN_GUEST */ - /* Everything else should go further down. */ - FIX_APIC_BASE, - FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, - FIX_ACPI_BEGIN, - FIX_ACPI_END = FIX_ACPI_BEGIN + NUM_FIXMAP_ACPI_PAGES - 1, - FIX_HPET_BASE, - FIX_TBOOT_SHARED_BASE, - FIX_MSIX_IO_RESERV_BASE, - FIX_MSIX_IO_RESERV_END = FIX_MSIX_IO_RESERV_BASE + FIX_MSIX_MAX_PAGES -1, - FIX_TBOOT_MAP_ADDRESS, - FIX_APEI_RANGE_BASE, - FIX_APEI_RANGE_END = FIX_APEI_RANGE_BASE + FIX_APEI_RANGE_MAX -1, - FIX_EFI_MPF, - __end_of_fixed_addresses -}; - -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) - -extern void __set_fixmap( - enum fixed_addresses idx, unsigned long mfn, unsigned long flags); - -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR) - -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR_UCMINUS) - -#define clear_fixmap(idx) __set_fixmap(idx, 0, 0) - -#define __fix_to_virt(x) gcc11_wrap(FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -#define fix_to_virt(x) ((void *)__fix_to_virt(x)) - -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} - -enum fixed_addresses_x { - /* Index 0 is reserved since fix_x_to_virt(0) == FIXADDR_X_TOP. */ - FIX_X_RESERVED, -#ifdef CONFIG_HYPERV_GUEST - FIX_X_HYPERV_HCALL, -#endif - __end_of_fixed_addresses_x -}; - -#define FIXADDR_X_SIZE (__end_of_fixed_addresses_x << PAGE_SHIFT) -#define FIXADDR_X_START (FIXADDR_X_TOP - FIXADDR_X_SIZE) - -extern void __set_fixmap_x( - enum fixed_addresses_x idx, unsigned long mfn, unsigned long flags); - -#define set_fixmap_x(idx, phys) \ - __set_fixmap_x(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR_RX | MAP_SMALL_PAGES) - -#define clear_fixmap_x(idx) __set_fixmap_x(idx, 0, 0) - -#define __fix_x_to_virt(x) (FIXADDR_X_TOP - ((x) << PAGE_SHIFT)) -#define fix_x_to_virt(x) ((void *)__fix_x_to_virt(x)) - -#endif /* __ASSEMBLY__ */ - -#endif diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h deleted file mode 100644 index 0be2273387..0000000000 --- a/xen/include/asm-x86/flushtlb.h +++ /dev/null @@ -1,203 +0,0 @@ -/****************************************************************************** - * flushtlb.h - * - * TLB flushes are timestamped using a global virtual 'clock' which ticks - * on any TLB flush on any processor. - * - * Copyright (c) 2003-2004, K A Fraser - */ - -#ifndef __FLUSHTLB_H__ -#define __FLUSHTLB_H__ - -#include -#include -#include -#include - -/* The current time as shown by the virtual TLB clock. */ -extern u32 tlbflush_clock; - -/* Time at which each CPU's TLB was last flushed. */ -DECLARE_PER_CPU(u32, tlbflush_time); - -/* TLB clock is in use. */ -extern bool tlb_clk_enabled; - -static inline uint32_t tlbflush_current_time(void) -{ - /* Returning 0 from tlbflush_current_time will always force a flush. */ - return tlb_clk_enabled ? tlbflush_clock : 0; -} - -static inline void page_set_tlbflush_timestamp(struct page_info *page) -{ - /* Avoid the write if the TLB clock is disabled. */ - if ( !tlb_clk_enabled ) - return; - - /* - * Prevent storing a stale time stamp, which could happen if an update - * to tlbflush_clock plus a subsequent flush IPI happen between the - * reading of tlbflush_clock and the writing of the struct page_info - * field. - */ - ASSERT(local_irq_is_enabled()); - local_irq_disable(); - page->tlbflush_timestamp = tlbflush_current_time(); - local_irq_enable(); -} - -/* - * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing. - * @lastuse_stamp is a timestamp taken when the PFN we are testing was last - * used for a purpose that may have caused the CPU's TLB to become tainted. - */ -static inline bool NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp) -{ - u32 curr_time = tlbflush_current_time(); - /* - * Two cases: - * 1. During a wrap, the clock ticks over to 0 while CPUs catch up. For - * safety during this period, we force a flush if @curr_time == 0. - * 2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp. - * To detect false positives because @cpu_stamp has wrapped, we - * also check @curr_time. If less than @lastuse_stamp we definitely - * wrapped, so there's no need for a flush (one is forced every wrap). - */ - return ((curr_time == 0) || - ((cpu_stamp <= lastuse_stamp) && - (lastuse_stamp <= curr_time))); -} - -/* - * Filter the given set of CPUs, removing those that definitely flushed their - * TLB since @page_timestamp. - */ -static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) -{ - unsigned int cpu; - - /* Short-circuit: there's no need to iterate if the clock is disabled. */ - if ( !tlb_clk_enabled ) - return; - - for_each_cpu ( cpu, mask ) - if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) - __cpumask_clear_cpu(cpu, mask); -} - -void new_tlbflush_clock_period(void); - -/* Read pagetable base. */ -static inline unsigned long read_cr3(void) -{ - unsigned long cr3; - __asm__ __volatile__ ( - "mov %%cr3, %0" : "=r" (cr3) : ); - return cr3; -} - -/* Write pagetable base and implicitly tick the tlbflush clock. */ -void switch_cr3_cr4(unsigned long cr3, unsigned long cr4); - -/* flush_* flag fields: */ - /* - * Area to flush: 2^flush_order pages. Default is flush entire address space. - * NB. Multi-page areas do not need to have been mapped with a superpage. - */ -#define FLUSH_ORDER_MASK 0xff -#define FLUSH_ORDER(x) ((x)+1) - /* Flush TLBs (or parts thereof) */ -#define FLUSH_TLB 0x100 - /* Flush TLBs (or parts thereof) including global mappings */ -#define FLUSH_TLB_GLOBAL 0x200 - /* Flush data caches */ -#define FLUSH_CACHE 0x400 - /* VA for the flush has a valid mapping */ -#define FLUSH_VA_VALID 0x800 - /* Flush CPU state */ -#define FLUSH_VCPU_STATE 0x1000 - /* Flush the per-cpu root page table */ -#define FLUSH_ROOT_PGTBL 0x2000 -#if CONFIG_HVM - /* Flush all HVM guests linear TLB (using ASID/VPID) */ -#define FLUSH_HVM_ASID_CORE 0x4000 -#else -#define FLUSH_HVM_ASID_CORE 0 -#endif -#if defined(CONFIG_PV) || defined(CONFIG_SHADOW_PAGING) -/* - * Force an IPI to be sent. Note that adding this to the flags passed to - * flush_area_mask will prevent using the assisted flush without having any - * other side effect. - */ -# define FLUSH_FORCE_IPI 0x8000 -#else -# define FLUSH_FORCE_IPI 0 -#endif - -/* Flush local TLBs/caches. */ -unsigned int flush_area_local(const void *va, unsigned int flags); -#define flush_local(flags) flush_area_local(NULL, flags) - -/* Flush specified CPUs' TLBs/caches */ -void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags); -#define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags) - -/* Flush all CPUs' TLBs/caches */ -#define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags) -#define flush_all(flags) flush_mask(&cpu_online_map, flags) - -/* Flush local TLBs */ -#define flush_tlb_local() \ - flush_local(FLUSH_TLB) -#define flush_tlb_one_local(v) \ - flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0)) - -/* Flush specified CPUs' TLBs */ -#define flush_tlb_mask(mask) \ - flush_mask(mask, FLUSH_TLB) -#define flush_tlb_one_mask(mask,v) \ - flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0)) - -/* - * Make the common code TLB flush helper force use of an IPI in order to be - * on the safe side. Note that not all calls from common code strictly require - * this. - */ -#define arch_flush_tlb_mask(mask) flush_mask(mask, FLUSH_TLB | FLUSH_FORCE_IPI) - -/* Flush all CPUs' TLBs */ -#define flush_tlb_all() \ - flush_tlb_mask(&cpu_online_map) -#define flush_tlb_one_all(v) \ - flush_tlb_one_mask(&cpu_online_map, v) - -#define flush_root_pgtbl_domain(d) \ -{ \ - if ( is_pv_domain(d) && (d)->arch.pv.xpti ) \ - flush_mask((d)->dirty_cpumask, FLUSH_ROOT_PGTBL); \ -} - -static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {} -static inline int invalidate_dcache_va_range(const void *p, - unsigned long size) -{ return -EOPNOTSUPP; } -static inline int clean_and_invalidate_dcache_va_range(const void *p, - unsigned long size) -{ - unsigned int order = get_order_from_bytes(size); - /* sub-page granularity support needs to be added if necessary */ - flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order)); - return 0; -} -static inline int clean_dcache_va_range(const void *p, unsigned long size) -{ - return clean_and_invalidate_dcache_va_range(p, size); -} - -unsigned int guest_flush_tlb_flags(const struct domain *d); -void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask); - -#endif /* __FLUSHTLB_H__ */ diff --git a/xen/include/asm-x86/genapic.h b/xen/include/asm-x86/genapic.h deleted file mode 100644 index 51a65d3e0f..0000000000 --- a/xen/include/asm-x86/genapic.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef _ASM_GENAPIC_H -#define _ASM_GENAPIC_H 1 - -/* - * Generic APIC driver interface. - * - * An straight forward mapping of the APIC related parts of the - * x86 subarchitecture interface to a dynamic object. - * - * This is used by the "generic" x86 subarchitecture. - * - * Copyright 2003 Andi Kleen, SuSE Labs. - */ - -struct mpc_config_translation; -struct mpc_config_bus; -struct mp_config_table; -struct mpc_config_processor; - -struct genapic { - const char *name; - int (*probe)(void); - - /* Interrupt delivery parameters ('physical' vs. 'logical flat'). */ - int int_delivery_mode; - int int_dest_mode; - void (*init_apic_ldr)(void); - const cpumask_t *(*vector_allocation_cpumask)(int cpu); - unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); - void (*send_IPI_mask)(const cpumask_t *mask, int vector); - void (*send_IPI_self)(uint8_t vector); -}; - -#define APIC_INIT(aname, aprobe) \ - .name = aname, \ - .probe = aprobe - -extern struct genapic genapic; -extern const struct genapic apic_default; -extern const struct genapic apic_bigsmp; - -void send_IPI_self_legacy(uint8_t vector); - -void init_apic_ldr_flat(void); -unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask); -void send_IPI_mask_flat(const cpumask_t *mask, int vector); -const cpumask_t *vector_allocation_cpumask_flat(int cpu); -#define GENAPIC_FLAT \ - .int_delivery_mode = dest_LowestPrio, \ - .int_dest_mode = 1 /* logical delivery */, \ - .init_apic_ldr = init_apic_ldr_flat, \ - .vector_allocation_cpumask = vector_allocation_cpumask_flat, \ - .cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \ - .send_IPI_mask = send_IPI_mask_flat, \ - .send_IPI_self = send_IPI_self_legacy - -void init_apic_ldr_phys(void); -unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask); -void send_IPI_mask_phys(const cpumask_t *mask, int vector); -const cpumask_t *vector_allocation_cpumask_phys(int cpu); -#define GENAPIC_PHYS \ - .int_delivery_mode = dest_Fixed, \ - .int_dest_mode = 0 /* physical delivery */, \ - .init_apic_ldr = init_apic_ldr_phys, \ - .vector_allocation_cpumask = vector_allocation_cpumask_phys, \ - .cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \ - .send_IPI_mask = send_IPI_mask_phys, \ - .send_IPI_self = send_IPI_self_legacy - -#endif diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-x86/grant_table.h deleted file mode 100644 index a8a21439a4..0000000000 --- a/xen/include/asm-x86/grant_table.h +++ /dev/null @@ -1,80 +0,0 @@ -/****************************************************************************** - * include/asm-x86/grant_table.h - * - * Copyright (c) 2004-2005 K A Fraser - */ - -#ifndef __ASM_GRANT_TABLE_H__ -#define __ASM_GRANT_TABLE_H__ - -#include - -#include -#include - -#define INITIAL_NR_GRANT_FRAMES 1U - -struct grant_table_arch { -}; - -static inline int create_grant_host_mapping(uint64_t addr, mfn_t frame, - unsigned int flags, - unsigned int cache_flags) -{ - if ( paging_mode_external(current->domain) ) - return create_grant_p2m_mapping(addr, frame, flags, cache_flags); - return create_grant_pv_mapping(addr, frame, flags, cache_flags); -} - -static inline int replace_grant_host_mapping(uint64_t addr, mfn_t frame, - uint64_t new_addr, - unsigned int flags) -{ - if ( paging_mode_external(current->domain) ) - return replace_grant_p2m_mapping(addr, frame, new_addr, flags); - return replace_grant_pv_mapping(addr, frame, new_addr, flags); -} - -#define gnttab_init_arch(gt) 0 -#define gnttab_destroy_arch(gt) do {} while ( 0 ) -#define gnttab_set_frame_gfn(gt, st, idx, gfn, mfn) \ - (gfn_eq(gfn, INVALID_GFN) \ - ? guest_physmap_remove_page((gt)->domain, \ - gnttab_get_frame_gfn(gt, st, idx), \ - mfn, 0) \ - : 0 /* Handled in add_to_physmap_one(). */) -#define gnttab_get_frame_gfn(gt, st, idx) ({ \ - mfn_t mfn_ = (st) ? gnttab_status_mfn(gt, idx) \ - : gnttab_shared_mfn(gt, idx); \ - unsigned long gpfn_ = get_gpfn_from_mfn(mfn_x(mfn_)); \ - VALID_M2P(gpfn_) ? _gfn(gpfn_) : INVALID_GFN; \ -}) - -#define gnttab_shared_mfn(t, i) _mfn(__virt_to_mfn((t)->shared_raw[i])) - -#define gnttab_shared_gfn(d, t, i) mfn_to_gfn(d, gnttab_shared_mfn(t, i)) - -#define gnttab_status_mfn(t, i) _mfn(__virt_to_mfn((t)->status[i])) - -#define gnttab_status_gfn(d, t, i) mfn_to_gfn(d, gnttab_status_mfn(t, i)) - -#define gnttab_mark_dirty(d, f) paging_mark_dirty(d, f) - -static inline void gnttab_clear_flags(struct domain *d, - unsigned int mask, uint16_t *addr) -{ - /* Access must be confined to the specified 2 bytes. */ - asm volatile ("lock andw %1,%0" : "+m" (*addr) : "ir" ((uint16_t)~mask)); -} - -/* Foreign mappings of HVM-guest pages do not modify the type count. */ -#define gnttab_host_mapping_get_page_type(ro, ld, rd) \ - (!(ro) && (((ld) == (rd)) || !paging_mode_external(rd))) - -/* Done implicitly when page tables are destroyed. */ -#define gnttab_release_host_mappings(domain) ( paging_mode_external(domain) ) - -#define gnttab_need_iommu_mapping(d) \ - (!paging_mode_translate(d) && need_iommu_pt_sync(d)) - -#endif /* __ASM_GRANT_TABLE_H__ */ diff --git a/xen/include/asm-x86/guest.h b/xen/include/asm-x86/guest.h deleted file mode 100644 index ccf1ffbb72..0000000000 --- a/xen/include/asm-x86/guest.h +++ /dev/null @@ -1,39 +0,0 @@ -/****************************************************************************** - * asm-x86/guest.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2017 Citrix Systems Ltd. - */ - -#ifndef __X86_GUEST_H__ -#define __X86_GUEST_H__ - -#include -#include -#include -#include -#include -#include - -#endif /* __X86_GUEST_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest/hyperv-hcall.h b/xen/include/asm-x86/guest/hyperv-hcall.h deleted file mode 100644 index 423ca0860b..0000000000 --- a/xen/include/asm-x86/guest/hyperv-hcall.h +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * asm-x86/guest/hyperv-hcall.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2019 Microsoft. - */ - -#ifndef __X86_HYPERV_HCALL_H__ -#define __X86_HYPERV_HCALL_H__ - -#include -#include -#include - -#include -#include -#include - -static inline uint64_t hv_do_hypercall(uint64_t control, paddr_t input_addr, - paddr_t output_addr) -{ - uint64_t status; - register unsigned long r8 asm ( "r8" ) = output_addr; - - /* See TLFS for volatile registers */ - asm volatile ( "call hv_hcall_page" - : "=a" (status), "+c" (control), - "+d" (input_addr) ASM_CALL_CONSTRAINT - : "r" (r8) - : "memory" ); - - return status; -} - -static inline uint64_t hv_do_fast_hypercall(uint16_t code, - uint64_t input1, uint64_t input2) -{ - uint64_t status; - uint64_t control = code | HV_HYPERCALL_FAST_BIT; - register unsigned long r8 asm ( "r8" ) = input2; - - /* See TLFS for volatile registers */ - asm volatile ( "call hv_hcall_page" - : "=a" (status), "+c" (control), - "+d" (input1) ASM_CALL_CONSTRAINT - : "r" (r8) ); - - return status; -} - -static inline uint64_t hv_do_rep_hypercall(uint16_t code, uint16_t rep_count, - uint16_t varhead_size, - paddr_t input, paddr_t output) -{ - uint64_t control = code; - uint64_t status; - uint16_t rep_comp; - - control |= (uint64_t)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; - control |= (uint64_t)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; - - do { - status = hv_do_hypercall(control, input, output); - if ( (status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS ) - break; - - rep_comp = MASK_EXTR(status, HV_HYPERCALL_REP_COMP_MASK); - - control &= ~HV_HYPERCALL_REP_START_MASK; - control |= MASK_INSR(rep_comp, HV_HYPERCALL_REP_START_MASK); - } while ( rep_comp < rep_count ); - - return status; -} - -#endif /* __X86_HYPERV_HCALL_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest/hyperv-tlfs.h b/xen/include/asm-x86/guest/hyperv-tlfs.h deleted file mode 100644 index 03b71af82f..0000000000 --- a/xen/include/asm-x86/guest/hyperv-tlfs.h +++ /dev/null @@ -1,934 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* - * This file contains definitions from Hyper-V Hypervisor Top-Level Functional - * Specification (TLFS): - * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs - */ - -#ifndef _ASM_X86_HYPERV_TLFS_H -#define _ASM_X86_HYPERV_TLFS_H - -#include -#include -#include - -/* - * While not explicitly listed in the TLFS, Hyper-V always runs with a page size - * of 4096. These definitions are used when communicating with Hyper-V using - * guest physical pages and guest physical page addresses, since the guest page - * size may not be 4096 on all architectures. - */ -#define HV_HYP_PAGE_SHIFT 12 -#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT, UL) -#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) - -/* - * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent - * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). - */ -#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 -#define HYPERV_CPUID_INTERFACE 0x40000001 -#define HYPERV_CPUID_VERSION 0x40000002 -#define HYPERV_CPUID_FEATURES 0x40000003 -#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 -#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 -#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A - -#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000 -#define HYPERV_CPUID_MIN 0x40000005 -#define HYPERV_CPUID_MAX 0x4000ffff - -/* - * Feature identification. EAX indicates which features are available - * to the partition based upon the current partition privileges. - * These are HYPERV_CPUID_FEATURES.EAX bits. - */ - -/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ -#define HV_X64_MSR_VP_RUNTIME_AVAILABLE BIT(0, UL) -/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ -#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1, UL) -/* - * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM - * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available - */ -#define HV_X64_MSR_SYNIC_AVAILABLE BIT(2, UL) -/* - * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through - * HV_X64_MSR_STIMER3_COUNT) available - */ -#define HV_MSR_SYNTIMER_AVAILABLE BIT(3, UL) -/* - * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) - * are available - */ -#define HV_X64_MSR_APIC_ACCESS_AVAILABLE BIT(4, UL) -/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ -#define HV_X64_MSR_HYPERCALL_AVAILABLE BIT(5, UL) -/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ -#define HV_X64_MSR_VP_INDEX_AVAILABLE BIT(6, UL) -/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ -#define HV_X64_MSR_RESET_AVAILABLE BIT(7, UL) -/* - * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, - * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, - * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available - */ -#define HV_X64_MSR_STAT_PAGES_AVAILABLE BIT(8, UL) -/* Partition reference TSC MSR is available */ -#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9, UL) -/* Partition Guest IDLE MSR is available */ -#define HV_X64_MSR_GUEST_IDLE_AVAILABLE BIT(10, UL) -/* - * There is a single feature flag that signifies if the partition has access - * to MSRs with local APIC and TSC frequencies. - */ -#define HV_X64_ACCESS_FREQUENCY_MSRS BIT(11, UL) -/* AccessReenlightenmentControls privilege */ -#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13, UL) - -/* - * Feature identification: indicates which flags were specified at partition - * creation. The format is the same as the partition creation flag structure - * defined in section Partition Creation Flags. - * These are HYPERV_CPUID_FEATURES.EBX bits. - */ -#define HV_X64_CREATE_PARTITIONS BIT(0, UL) -#define HV_X64_ACCESS_PARTITION_ID BIT(1, UL) -#define HV_X64_ACCESS_MEMORY_POOL BIT(2, UL) -#define HV_X64_ADJUST_MESSAGE_BUFFERS BIT(3, UL) -#define HV_X64_POST_MESSAGES BIT(4, UL) -#define HV_X64_SIGNAL_EVENTS BIT(5, UL) -#define HV_X64_CREATE_PORT BIT(6, UL) -#define HV_X64_CONNECT_PORT BIT(7, UL) -#define HV_X64_ACCESS_STATS BIT(8, UL) -#define HV_X64_DEBUGGING BIT(11, UL) -#define HV_X64_CPU_POWER_MANAGEMENT BIT(12, UL) - -/* - * Feature identification. EDX indicates which miscellaneous features - * are available to the partition. - * These are HYPERV_CPUID_FEATURES.EDX bits. - */ -/* The MWAIT instruction is available (per section MONITOR / MWAIT) */ -#define HV_X64_MWAIT_AVAILABLE BIT(0, UL) -/* Guest debugging support is available */ -#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1, UL) -/* Performance Monitor support is available*/ -#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2, UL) -/* Support for physical CPU dynamic partitioning events is available*/ -#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3, UL) -/* - * Support for passing hypercall input parameter block via XMM - * registers is available - */ -#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4, UL) -/* Support for a virtual guest idle state is available */ -#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5, UL) -/* Frequency MSRs available */ -#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8, UL) -/* Crash MSR available */ -#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10, UL) -/* stimer Direct Mode is available */ -#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19, UL) - -/* - * Implementation recommendations. Indicates which behaviors the hypervisor - * recommends the OS implement for optimal performance. - * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits. - */ -/* - * Recommend using hypercall for address space switches rather - * than MOV to CR3 instruction - */ -#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0, UL) -/* Recommend using hypercall for local TLB flushes rather - * than INVLPG or MOV to CR3 instructions */ -#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1, UL) -/* - * Recommend using hypercall for remote TLB flushes rather - * than inter-processor interrupts - */ -#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2, UL) -/* - * Recommend using MSRs for accessing APIC registers - * EOI, ICR and TPR rather than their memory-mapped counterparts - */ -#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3, UL) -/* Recommend using the hypervisor-provided MSR to initiate a system RESET */ -#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4, UL) -/* - * Recommend using relaxed timing for this partition. If used, - * the VM should disable any watchdog timeouts that rely on the - * timely delivery of external interrupts - */ -#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5, UL) - -/* - * Recommend not using Auto End-Of-Interrupt feature - */ -#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9, UL) - -/* - * Recommend using cluster IPI hypercalls. - */ -#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10, UL) - -/* Recommend using the newer ExProcessorMasks interface */ -#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11, UL) - -/* Recommend using enlightened VMCS */ -#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14, UL) - -/* - * Virtual processor will never share a physical core with another virtual - * processor, except for virtual processors that are reported as sibling SMT - * threads. - */ -#define HV_X64_NO_NONARCH_CORESHARING BIT(18, UL) - -/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */ -#define HV_X64_NESTED_DIRECT_FLUSH BIT(17, UL) -#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18, UL) -#define HV_X64_NESTED_MSR_BITMAP BIT(19, UL) - -/* Hyper-V specific model specific registers (MSRs) */ - -/* MSR used to identify the guest OS. */ -#define HV_X64_MSR_GUEST_OS_ID 0x40000000 - -/* MSR used to setup pages used to communicate with the hypervisor. */ -#define HV_X64_MSR_HYPERCALL 0x40000001 - -/* MSR used to provide vcpu index */ -#define HV_X64_MSR_VP_INDEX 0x40000002 - -/* MSR used to reset the guest OS. */ -#define HV_X64_MSR_RESET 0x40000003 - -/* MSR used to provide vcpu runtime in 100ns units */ -#define HV_X64_MSR_VP_RUNTIME 0x40000010 - -/* MSR used to read the per-partition time reference counter */ -#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 - -/* A partition's reference time stamp counter (TSC) page */ -#define HV_X64_MSR_REFERENCE_TSC 0x40000021 - -/* MSR used to retrieve the TSC frequency */ -#define HV_X64_MSR_TSC_FREQUENCY 0x40000022 - -/* MSR used to retrieve the local APIC timer frequency */ -#define HV_X64_MSR_APIC_FREQUENCY 0x40000023 - -/* Define the virtual APIC registers */ -#define HV_X64_MSR_EOI 0x40000070 -#define HV_X64_MSR_ICR 0x40000071 -#define HV_X64_MSR_TPR 0x40000072 -#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 - -/* Define synthetic interrupt controller model specific registers. */ -#define HV_X64_MSR_SCONTROL 0x40000080 -#define HV_X64_MSR_SVERSION 0x40000081 -#define HV_X64_MSR_SIEFP 0x40000082 -#define HV_X64_MSR_SIMP 0x40000083 -#define HV_X64_MSR_EOM 0x40000084 -#define HV_X64_MSR_SINT0 0x40000090 -#define HV_X64_MSR_SINT1 0x40000091 -#define HV_X64_MSR_SINT2 0x40000092 -#define HV_X64_MSR_SINT3 0x40000093 -#define HV_X64_MSR_SINT4 0x40000094 -#define HV_X64_MSR_SINT5 0x40000095 -#define HV_X64_MSR_SINT6 0x40000096 -#define HV_X64_MSR_SINT7 0x40000097 -#define HV_X64_MSR_SINT8 0x40000098 -#define HV_X64_MSR_SINT9 0x40000099 -#define HV_X64_MSR_SINT10 0x4000009A -#define HV_X64_MSR_SINT11 0x4000009B -#define HV_X64_MSR_SINT12 0x4000009C -#define HV_X64_MSR_SINT13 0x4000009D -#define HV_X64_MSR_SINT14 0x4000009E -#define HV_X64_MSR_SINT15 0x4000009F - -/* - * Synthetic Timer MSRs. Four timers per vcpu. - */ -#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 -#define HV_X64_MSR_STIMER0_COUNT 0x400000B1 -#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 -#define HV_X64_MSR_STIMER1_COUNT 0x400000B3 -#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 -#define HV_X64_MSR_STIMER2_COUNT 0x400000B5 -#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 -#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 - -/* Hyper-V guest idle MSR */ -#define HV_X64_MSR_GUEST_IDLE 0x400000F0 - -/* Hyper-V guest crash notification MSR's */ -#define HV_X64_MSR_CRASH_P0 0x40000100 -#define HV_X64_MSR_CRASH_P1 0x40000101 -#define HV_X64_MSR_CRASH_P2 0x40000102 -#define HV_X64_MSR_CRASH_P3 0x40000103 -#define HV_X64_MSR_CRASH_P4 0x40000104 -#define HV_X64_MSR_CRASH_CTL 0x40000105 - -/* TSC emulation after migration */ -#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 -#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 -#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 - -/* - * Declare the MSR used to setup pages used to communicate with the hypervisor. - */ -union hv_x64_msr_hypercall_contents { - u64 as_uint64; - struct { - u64 enable:1; - u64 reserved:11; - u64 guest_physical_address:52; - }; -}; - -/* - * TSC page layout. - */ -struct ms_hyperv_tsc_page { - volatile u32 tsc_sequence; - u32 reserved1; - volatile u64 tsc_scale; - volatile s64 tsc_offset; - u64 reserved2[509]; -}; - -/* - * The guest OS needs to register the guest ID with the hypervisor. - * The guest ID is a 64 bit entity and the structure of this ID is - * specified in the Hyper-V specification: - * - * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx - * - * While the current guideline does not specify how Linux guest ID(s) - * need to be generated, our plan is to publish the guidelines for - * Linux and other guest operating systems that currently are hosted - * on Hyper-V. The implementation here conforms to this yet - * unpublished guidelines. - * - * - * Bit(s) - * 63 - Indicates if the OS is Open Source or not; 1 is Open Source - * 62:56 - Os Type; Linux 0x1, FreeBSD 0x2, Xen 0x3 - * 55:48 - Distro specific identification - * 47:16 - Guest OS version number - * 15:0 - Distro specific identification - * - * - */ - -#define HV_LINUX_VENDOR_ID 0x8100 -#define HV_XEN_VENDOR_ID 0x8300 -union hv_guest_os_id -{ - uint64_t raw; - struct - { - uint64_t build_number:16; - uint64_t service_pack:8; - uint64_t minor:8; - uint64_t major:8; - uint64_t os:8; - uint64_t vendor:16; - }; -}; - -struct hv_reenlightenment_control { - __u64 vector:8; - __u64 reserved1:8; - __u64 enabled:1; - __u64 reserved2:15; - __u64 target_vp:32; -}; - -struct hv_tsc_emulation_control { - __u64 enabled:1; - __u64 reserved:63; -}; - -struct hv_tsc_emulation_status { - __u64 inprogress:1; - __u64 reserved:63; -}; - -#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 -#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 -#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ - (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) - -/* - * Crash notification (HV_X64_MSR_CRASH_CTL) flags. - */ -#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) -#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) -#define HV_X64_MSR_CRASH_PARAMS \ - (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) - -#define HV_IPI_LOW_VECTOR 0x10 -#define HV_IPI_HIGH_VECTOR 0xff - -/* Declare the various hypercall operations. */ -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 -#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 -#define HVCALL_SEND_IPI 0x000b -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 -#define HVCALL_SEND_IPI_EX 0x0015 -#define HVCALL_POST_MESSAGE 0x005c -#define HVCALL_SIGNAL_EVENT 0x005d -#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af -#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 -#define HVCALL_EXT_CALL_QUERY_CAPABILITIES 0x8001 - -#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001 -#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12 -#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \ - (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) - -/* Hyper-V Enlightened VMCS version mask in nested features CPUID */ -#define HV_X64_ENLIGHTENED_VMCS_VERSION 0xff - -#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001 -#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12 - -#define HV_PROCESSOR_POWER_STATE_C0 0 -#define HV_PROCESSOR_POWER_STATE_C1 1 -#define HV_PROCESSOR_POWER_STATE_C2 2 -#define HV_PROCESSOR_POWER_STATE_C3 3 - -#define HV_FLUSH_ALL_PROCESSORS BIT(0, UL) -#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1, UL) -#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2, UL) -#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3, UL) - -enum HV_GENERIC_SET_FORMAT { - HV_GENERIC_SET_SPARSE_4K, - HV_GENERIC_SET_ALL, -}; - -#define HV_HYPERCALL_RESULT_MASK 0xffff /* GENMASK_ULL(15, 0) */ -#define HV_HYPERCALL_FAST_BIT BIT(16, UL) -#define HV_HYPERCALL_VARHEAD_OFFSET 17 -#define HV_HYPERCALL_REP_COMP_OFFSET 32 -#define HV_HYPERCALL_REP_COMP_MASK (0xfffULL << HV_HYPERCALL_REP_COMP_OFFSET) /* GENMASK_ULL(43, 32) */ -#define HV_HYPERCALL_REP_START_OFFSET 48 -#define HV_HYPERCALL_REP_START_MASK (0xfffULL << HV_HYPERCALL_REP_START_OFFSET) /* GENMASK_ULL(59, 48) */ - -/* hypercall status code */ -#define HV_STATUS_SUCCESS 0 -#define HV_STATUS_INVALID_HYPERCALL_CODE 2 -#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 -#define HV_STATUS_INVALID_ALIGNMENT 4 -#define HV_STATUS_INVALID_PARAMETER 5 -#define HV_STATUS_INSUFFICIENT_MEMORY 11 -#define HV_STATUS_INVALID_PORT_ID 17 -#define HV_STATUS_INVALID_CONNECTION_ID 18 -#define HV_STATUS_INSUFFICIENT_BUFFERS 19 - -/* - * The Hyper-V TimeRefCount register and the TSC - * page provide a guest VM clock with 100ns tick rate - */ -#define HV_CLOCK_HZ (NSEC_PER_SEC/100) - -typedef struct _HV_REFERENCE_TSC_PAGE { - __u32 tsc_sequence; - __u32 res1; - __u64 tsc_scale; - __s64 tsc_offset; -} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; - -/* Define the number of synthetic interrupt sources. */ -#define HV_SYNIC_SINT_COUNT (16) -/* Define the expected SynIC version. */ -#define HV_SYNIC_VERSION_1 (0x1) -/* Valid SynIC vectors are 16-255. */ -#define HV_SYNIC_FIRST_VALID_VECTOR (16) - -#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0) -#define HV_SYNIC_SIMP_ENABLE (1ULL << 0) -#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0) -#define HV_SYNIC_SINT_MASKED (1ULL << 16) -#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) -#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) - -#define HV_SYNIC_STIMER_COUNT (4) - -/* Define synthetic interrupt controller message constants. */ -#define HV_MESSAGE_SIZE (256) -#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) -#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) - -/* Define hypervisor message types. */ -enum hv_message_type { - HVMSG_NONE = 0x00000000, - - /* Memory access messages. */ - HVMSG_UNMAPPED_GPA = 0x80000000, - HVMSG_GPA_INTERCEPT = 0x80000001, - - /* Timer notification messages. */ - HVMSG_TIMER_EXPIRED = 0x80000010, - - /* Error messages. */ - HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, - HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, - HVMSG_UNSUPPORTED_FEATURE = 0x80000022, - - /* Trace buffer complete messages. */ - HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, - - /* Platform-specific processor intercept messages. */ - HVMSG_X64_IOPORT_INTERCEPT = 0x80010000, - HVMSG_X64_MSR_INTERCEPT = 0x80010001, - HVMSG_X64_CPUID_INTERCEPT = 0x80010002, - HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, - HVMSG_X64_APIC_EOI = 0x80010004, - HVMSG_X64_LEGACY_FP_ERROR = 0x80010005 -}; - -/* Define synthetic interrupt controller message flags. */ -union hv_message_flags { - __u8 asu8; - struct { - __u8 msg_pending:1; - __u8 reserved:7; - }; -}; - -/* Define port identifier type. */ -union hv_port_id { - __u32 asu32; - struct { - __u32 id:24; - __u32 reserved:8; - } u; -}; - -/* Define synthetic interrupt controller message header. */ -struct hv_message_header { - __u32 message_type; - __u8 payload_size; - union hv_message_flags message_flags; - __u8 reserved[2]; - union { - __u64 sender; - union hv_port_id port; - }; -}; - -/* Define synthetic interrupt controller message format. */ -struct hv_message { - struct hv_message_header header; - union { - __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; - } u; -}; - -/* Define the synthetic interrupt message page layout. */ -struct hv_message_page { - struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; -}; - -/* Define timer message payload structure. */ -struct hv_timer_message_payload { - __u32 timer_index; - __u32 reserved; - __u64 expiration_time; /* When the timer expired */ - __u64 delivery_time; /* When the message was delivered */ -}; - -struct hv_nested_enlightenments_control { - struct { - __u32 directhypercall:1; - __u32 reserved:31; - } features; - struct { - __u32 reserved; - } hypercallControls; -}; - -union hv_vp_assist_page_msr -{ - uint64_t raw; - struct - { - uint64_t enabled:1; - uint64_t reserved_preserved:11; - uint64_t pfn:48; - }; -}; - -/* Define virtual processor assist page structure. */ -struct hv_vp_assist_page { - __u32 apic_assist; - __u32 reserved1; - __u64 vtl_control[3]; - struct hv_nested_enlightenments_control nested_control; - __u8 enlighten_vmentry; - __u8 reserved2[7]; - __u64 current_nested_vmcs; -}; - -struct hv_enlightened_vmcs { - u32 revision_id; - u32 abort; - - u16 host_es_selector; - u16 host_cs_selector; - u16 host_ss_selector; - u16 host_ds_selector; - u16 host_fs_selector; - u16 host_gs_selector; - u16 host_tr_selector; - - u16 padding16_1; - - u64 host_ia32_pat; - u64 host_ia32_efer; - - u64 host_cr0; - u64 host_cr3; - u64 host_cr4; - - u64 host_ia32_sysenter_esp; - u64 host_ia32_sysenter_eip; - u64 host_rip; - u32 host_ia32_sysenter_cs; - - u32 pin_based_vm_exec_control; - u32 vm_exit_controls; - u32 secondary_vm_exec_control; - - u64 io_bitmap_a; - u64 io_bitmap_b; - u64 msr_bitmap; - - u16 guest_es_selector; - u16 guest_cs_selector; - u16 guest_ss_selector; - u16 guest_ds_selector; - u16 guest_fs_selector; - u16 guest_gs_selector; - u16 guest_ldtr_selector; - u16 guest_tr_selector; - - u32 guest_es_limit; - u32 guest_cs_limit; - u32 guest_ss_limit; - u32 guest_ds_limit; - u32 guest_fs_limit; - u32 guest_gs_limit; - u32 guest_ldtr_limit; - u32 guest_tr_limit; - u32 guest_gdtr_limit; - u32 guest_idtr_limit; - - u32 guest_es_ar_bytes; - u32 guest_cs_ar_bytes; - u32 guest_ss_ar_bytes; - u32 guest_ds_ar_bytes; - u32 guest_fs_ar_bytes; - u32 guest_gs_ar_bytes; - u32 guest_ldtr_ar_bytes; - u32 guest_tr_ar_bytes; - - u64 guest_es_base; - u64 guest_cs_base; - u64 guest_ss_base; - u64 guest_ds_base; - u64 guest_fs_base; - u64 guest_gs_base; - u64 guest_ldtr_base; - u64 guest_tr_base; - u64 guest_gdtr_base; - u64 guest_idtr_base; - - u64 padding64_1[3]; - - u64 vm_exit_msr_store_addr; - u64 vm_exit_msr_load_addr; - u64 vm_entry_msr_load_addr; - - u64 cr3_target_value0; - u64 cr3_target_value1; - u64 cr3_target_value2; - u64 cr3_target_value3; - - u32 page_fault_error_code_mask; - u32 page_fault_error_code_match; - - u32 cr3_target_count; - u32 vm_exit_msr_store_count; - u32 vm_exit_msr_load_count; - u32 vm_entry_msr_load_count; - - u64 tsc_offset; - u64 virtual_apic_page_addr; - u64 vmcs_link_pointer; - - u64 guest_ia32_debugctl; - u64 guest_ia32_pat; - u64 guest_ia32_efer; - - u64 guest_pdptr0; - u64 guest_pdptr1; - u64 guest_pdptr2; - u64 guest_pdptr3; - - u64 guest_pending_dbg_exceptions; - u64 guest_sysenter_esp; - u64 guest_sysenter_eip; - - u32 guest_activity_state; - u32 guest_sysenter_cs; - - u64 cr0_guest_host_mask; - u64 cr4_guest_host_mask; - u64 cr0_read_shadow; - u64 cr4_read_shadow; - u64 guest_cr0; - u64 guest_cr3; - u64 guest_cr4; - u64 guest_dr7; - - u64 host_fs_base; - u64 host_gs_base; - u64 host_tr_base; - u64 host_gdtr_base; - u64 host_idtr_base; - u64 host_rsp; - - u64 ept_pointer; - - u16 virtual_processor_id; - u16 padding16_2[3]; - - u64 padding64_2[5]; - u64 guest_physical_address; - - u32 vm_instruction_error; - u32 vm_exit_reason; - u32 vm_exit_intr_info; - u32 vm_exit_intr_error_code; - u32 idt_vectoring_info_field; - u32 idt_vectoring_error_code; - u32 vm_exit_instruction_len; - u32 vmx_instruction_info; - - u64 exit_qualification; - u64 exit_io_instruction_ecx; - u64 exit_io_instruction_esi; - u64 exit_io_instruction_edi; - u64 exit_io_instruction_eip; - - u64 guest_linear_address; - u64 guest_rsp; - u64 guest_rflags; - - u32 guest_interruptibility_info; - u32 cpu_based_vm_exec_control; - u32 exception_bitmap; - u32 vm_entry_controls; - u32 vm_entry_intr_info_field; - u32 vm_entry_exception_error_code; - u32 vm_entry_instruction_len; - u32 tpr_threshold; - - u64 guest_rip; - - u32 hv_clean_fields; - u32 hv_padding_32; - u32 hv_synthetic_controls; - struct { - u32 nested_flush_hypercall:1; - u32 msr_bitmap:1; - u32 reserved:30; - } hv_enlightenments_control; - u32 hv_vp_id; - - u64 hv_vm_id; - u64 partition_assist_page; - u64 padding64_4[4]; - u64 guest_bndcfgs; - u64 padding64_5[7]; - u64 xss_exit_bitmap; - u64 padding64_6[7]; -}; - -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0 -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP BIT(1, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2 BIT(2, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1 BIT(3, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC BIT(4, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT BIT(5, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY BIT(6, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN BIT(7, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR BIT(8, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT BIT(9, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC BIT(10, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1 BIT(11, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2 BIT(12, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER BIT(13, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1 BIT(14, UL) -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15, UL) - -#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF - -/* Define synthetic interrupt controller flag constants. */ -#define HV_EVENT_FLAGS_COUNT (256 * 8) -#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long)) - -/* - * Synthetic timer configuration. - */ -union hv_stimer_config { - u64 as_uint64; - struct { - u64 enable:1; - u64 periodic:1; - u64 lazy:1; - u64 auto_enable:1; - u64 apic_vector:8; - u64 direct_mode:1; - u64 reserved_z0:3; - u64 sintx:4; - u64 reserved_z1:44; - }; -}; - - -/* Define the synthetic interrupt controller event flags format. */ -union hv_synic_event_flags { - unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT]; -}; - -/* Define SynIC control register. */ -union hv_synic_scontrol { - u64 as_uint64; - struct { - u64 enable:1; - u64 reserved:63; - }; -}; - -/* Define synthetic interrupt source. */ -union hv_synic_sint { - u64 as_uint64; - struct { - u64 vector:8; - u64 reserved1:8; - u64 masked:1; - u64 auto_eoi:1; - u64 polling:1; - u64 reserved2:45; - }; -}; - -/* Define the format of the SIMP register */ -union hv_synic_simp { - u64 as_uint64; - struct { - u64 simp_enabled:1; - u64 preserved:11; - u64 base_simp_gpa:52; - }; -}; - -/* Define the format of the SIEFP register */ -union hv_synic_siefp { - u64 as_uint64; - struct { - u64 siefp_enabled:1; - u64 preserved:11; - u64 base_siefp_gpa:52; - }; -}; - -struct hv_vpset { - u64 format; - u64 valid_bank_mask; - u64 bank_contents[]; -}; - -/* HvCallSendSyntheticClusterIpi hypercall */ -struct hv_send_ipi { - u32 vector; - u32 reserved; - u64 cpu_mask; -}; - -/* HvCallSendSyntheticClusterIpiEx hypercall */ -struct hv_send_ipi_ex { - u32 vector; - u32 reserved; - struct hv_vpset vp_set; -}; - -/* HvFlushGuestPhysicalAddressSpace hypercalls */ -struct hv_guest_mapping_flush { - u64 address_space; - u64 flags; -}; - -/* - * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited - * by the bitwidth of "additional_pages" in union hv_gpa_page_range. - */ -#define HV_MAX_FLUSH_PAGES (2048) - -/* HvFlushGuestPhysicalAddressList hypercall */ -union hv_gpa_page_range { - u64 address_space; - struct { - u64 additional_pages:11; - u64 largepage:1; - u64 basepfn:52; - } page; -}; - -/* - * All input flush parameters should be in single page. The max flush - * count is equal with how many entries of union hv_gpa_page_range can - * be populated into the input parameter page. - */ -#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \ - sizeof(union hv_gpa_page_range)) - -struct hv_guest_mapping_flush_list { - u64 address_space; - u64 flags; - union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; -}; - -/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ -struct hv_tlb_flush { - u64 address_space; - u64 flags; - u64 processor_mask; - u64 gva_list[]; -}; - -/* HvFlushVirtualAddressSpaceEx hypercall */ -struct hv_tlb_flush_ex { - u64 address_space; - u64 flags; - struct hv_vpset hv_vp_set; - /* u64 gva_list[]; */ -}; - -struct hv_partition_assist_pg { - u32 tlb_lock_count; -}; -#endif diff --git a/xen/include/asm-x86/guest/hyperv.h b/xen/include/asm-x86/guest/hyperv.h deleted file mode 100644 index 1a1b47831c..0000000000 --- a/xen/include/asm-x86/guest/hyperv.h +++ /dev/null @@ -1,86 +0,0 @@ -/****************************************************************************** - * asm-x86/guest/hyperv.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2019 Microsoft. - */ - -#ifndef __X86_GUEST_HYPERV_H__ -#define __X86_GUEST_HYPERV_H__ - -#include - -/* Use top-most MFN for hypercall page */ -#define HV_HCALL_MFN (((1ull << paddr_bits) - 1) >> HV_HYP_PAGE_SHIFT) - -/* - * The specification says: "The partition reference time is computed - * by the following formula: - * - * ReferenceTime = ((VirtualTsc * TscScale) >> 64) + TscOffset - * - * The multiplication is a 64 bit multiplication, which results in a - * 128 bit number which is then shifted 64 times to the right to obtain - * the high 64 bits." - */ -static inline uint64_t hv_scale_tsc(uint64_t tsc, uint64_t scale, - int64_t offset) -{ - uint64_t result; - - /* - * Quadword MUL takes an implicit operand in RAX, and puts the result - * in RDX:RAX. Because we only want the result of the multiplication - * after shifting right by 64 bits, we therefore only need the content - * of RDX. - */ - asm ( "mulq %[scale]" - : "+a" (tsc), "=d" (result) - : [scale] "rm" (scale) ); - - return result + offset; -} - -#ifdef CONFIG_HYPERV_GUEST - -#include - -struct ms_hyperv_info { - uint32_t features; - uint32_t misc_features; - uint32_t hints; - uint32_t nested_features; - uint32_t max_vp_index; - uint32_t max_lp_index; -}; -extern struct ms_hyperv_info ms_hyperv; - -const struct hypervisor_ops *hyperv_probe(void); - -#else - -static inline const struct hypervisor_ops *hyperv_probe(void) { return NULL; } - -#endif /* CONFIG_HYPERV_GUEST */ -#endif /* __X86_GUEST_HYPERV_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest/hypervisor.h b/xen/include/asm-x86/guest/hypervisor.h deleted file mode 100644 index 0a6c3b47ab..0000000000 --- a/xen/include/asm-x86/guest/hypervisor.h +++ /dev/null @@ -1,85 +0,0 @@ -/****************************************************************************** - * asm-x86/guest/hypervisor.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2019 Microsoft. - */ - -#ifndef __X86_HYPERVISOR_H__ -#define __X86_HYPERVISOR_H__ - -#include - -#include - -struct hypervisor_ops { - /* Name of the hypervisor */ - const char *name; - /* Main setup routine */ - void (*setup)(void); - /* AP setup */ - int (*ap_setup)(void); - /* Resume from suspension */ - void (*resume)(void); - /* Fix up e820 map */ - void (*e820_fixup)(struct e820map *e820); - /* L0 assisted TLB flush */ - int (*flush_tlb)(const cpumask_t *mask, const void *va, unsigned int flags); -}; - -#ifdef CONFIG_GUEST - -const char *hypervisor_probe(void); -void hypervisor_setup(void); -int hypervisor_ap_setup(void); -void hypervisor_resume(void); -void hypervisor_e820_fixup(struct e820map *e820); -/* - * L0 assisted TLB flush. - * mask: cpumask of the dirty vCPUs that should be flushed. - * va: linear address to flush, or NULL for entire address space. - * flags: flags for flushing, including the order of va. - */ -int hypervisor_flush_tlb(const cpumask_t *mask, const void *va, - unsigned int flags); - -#else - -#include -#include - -static inline const char *hypervisor_probe(void) { return NULL; } -static inline void hypervisor_setup(void) { ASSERT_UNREACHABLE(); } -static inline int hypervisor_ap_setup(void) { return 0; } -static inline void hypervisor_resume(void) { ASSERT_UNREACHABLE(); } -static inline void hypervisor_e820_fixup(struct e820map *e820) {} -static inline int hypervisor_flush_tlb(const cpumask_t *mask, const void *va, - unsigned int flags) -{ - return -EOPNOTSUPP; -} - -#endif /* CONFIG_GUEST */ - -#endif /* __X86_HYPERVISOR_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest/pvh-boot.h b/xen/include/asm-x86/guest/pvh-boot.h deleted file mode 100644 index 48ffd1a0b1..0000000000 --- a/xen/include/asm-x86/guest/pvh-boot.h +++ /dev/null @@ -1,58 +0,0 @@ -/****************************************************************************** - * asm-x86/guest/pvh-boot.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2017 Citrix Systems Ltd. - */ - -#ifndef __X86_PVH_BOOT_H__ -#define __X86_PVH_BOOT_H__ - -#include - -#ifdef CONFIG_PVH_GUEST - -extern bool pvh_boot; - -void pvh_init(multiboot_info_t **mbi, module_t **mod); -void pvh_print_info(void); - -#else - -#include - -#define pvh_boot 0 - -static inline void pvh_init(multiboot_info_t **mbi, module_t **mod) -{ - ASSERT_UNREACHABLE(); -} - -static inline void pvh_print_info(void) -{ - ASSERT_UNREACHABLE(); -} - -#endif /* CONFIG_PVH_GUEST */ -#endif /* __X86_PVH_BOOT_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest/xen-hcall.h b/xen/include/asm-x86/guest/xen-hcall.h deleted file mode 100644 index 03d5868a9e..0000000000 --- a/xen/include/asm-x86/guest/xen-hcall.h +++ /dev/null @@ -1,212 +0,0 @@ -/****************************************************************************** - * asm-x86/guest/xen-hcall.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2017 Citrix Systems Ltd. - */ - -#ifndef __X86_XEN_HYPERCALL_H__ -#define __X86_XEN_HYPERCALL_H__ - -#ifdef CONFIG_XEN_GUEST - -#include - -#include - -#include -#include -#include - -#include - -/* - * Hypercall primatives for 64bit - * - * Inputs: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6) - */ - -#define _hypercall64_1(type, hcall, a1) \ - ({ \ - long res, tmp__; \ - asm volatile ( \ - "call hypercall_page + %c[offset]" \ - : "=a" (res), "=D" (tmp__) ASM_CALL_CONSTRAINT \ - : [offset] "i" (hcall * 32), \ - "1" ((long)(a1)) \ - : "memory" ); \ - (type)res; \ - }) - -#define _hypercall64_2(type, hcall, a1, a2) \ - ({ \ - long res, tmp__; \ - asm volatile ( \ - "call hypercall_page + %c[offset]" \ - : "=a" (res), "=D" (tmp__), "=S" (tmp__) \ - ASM_CALL_CONSTRAINT \ - : [offset] "i" (hcall * 32), \ - "1" ((long)(a1)), "2" ((long)(a2)) \ - : "memory" ); \ - (type)res; \ - }) - -#define _hypercall64_3(type, hcall, a1, a2, a3) \ - ({ \ - long res, tmp__; \ - asm volatile ( \ - "call hypercall_page + %c[offset]" \ - : "=a" (res), "=D" (tmp__), "=S" (tmp__), "=d" (tmp__) \ - ASM_CALL_CONSTRAINT \ - : [offset] "i" (hcall * 32), \ - "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)) \ - : "memory" ); \ - (type)res; \ - }) - -#define _hypercall64_4(type, hcall, a1, a2, a3, a4) \ - ({ \ - long res, tmp__; \ - register long _a4 asm ("r10") = ((long)(a4)); \ - asm volatile ( \ - "call hypercall_page + %c[offset]" \ - : "=a" (res), "=D" (tmp__), "=S" (tmp__), "=d" (tmp__), \ - "=&r" (tmp__) ASM_CALL_CONSTRAINT \ - : [offset] "i" (hcall * 32), \ - "1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)), \ - "4" (_a4) \ - : "memory" ); \ - (type)res; \ - }) - -/* - * Primitive Hypercall wrappers - */ -static inline long xen_hypercall_sched_op(unsigned int cmd, void *arg) -{ - return _hypercall64_2(long, __HYPERVISOR_sched_op, cmd, arg); -} - -static inline long xen_hypercall_memory_op(unsigned int cmd, void *arg) -{ - return _hypercall64_2(long, __HYPERVISOR_memory_op, cmd, arg); -} - -static inline int xen_hypercall_vcpu_op(unsigned int cmd, unsigned int vcpu, - void *arg) -{ - return _hypercall64_3(long, __HYPERVISOR_vcpu_op, cmd, vcpu, arg); -} - -static inline long xen_hypercall_event_channel_op(unsigned int cmd, void *arg) -{ - return _hypercall64_2(long, __HYPERVISOR_event_channel_op, cmd, arg); -} - -static inline long xen_hypercall_grant_table_op(unsigned int cmd, void *arg, - unsigned int count) -{ - return _hypercall64_3(long, __HYPERVISOR_grant_table_op, cmd, arg, count); -} - -static inline long xen_hypercall_hvm_op(unsigned int op, void *arg) -{ - return _hypercall64_2(long, __HYPERVISOR_hvm_op, op, arg); -} - -/* - * Higher level hypercall helpers - */ -static inline void xen_hypercall_console_write( - const char *buf, unsigned int count) -{ - (void)_hypercall64_3(long, __HYPERVISOR_console_io, - CONSOLEIO_write, count, buf); -} - -static inline long xen_hypercall_shutdown(unsigned int reason) -{ - struct sched_shutdown s = { .reason = reason }; - return xen_hypercall_sched_op(SCHEDOP_shutdown, &s); -} - -static inline long xen_hypercall_evtchn_send(evtchn_port_t port) -{ - struct evtchn_send send = { .port = port }; - - return xen_hypercall_event_channel_op(EVTCHNOP_send, &send); -} - -static inline long xen_hypercall_evtchn_unmask(evtchn_port_t port) -{ - struct evtchn_unmask unmask = { .port = port }; - - return xen_hypercall_event_channel_op(EVTCHNOP_unmask, &unmask); -} - -static inline long xen_hypercall_hvm_get_param(uint32_t index, uint64_t *value) -{ - struct xen_hvm_param xhv = { - .domid = DOMID_SELF, - .index = index, - }; - long ret = xen_hypercall_hvm_op(HVMOP_get_param, &xhv); - - if ( ret == 0 ) - *value = xhv.value; - - return ret; -} - -static inline long xen_hypercall_set_evtchn_upcall_vector( - unsigned int cpu, unsigned int vector) -{ - struct xen_hvm_evtchn_upcall_vector a = { - .vcpu = cpu, - .vector = vector, - }; - - return xen_hypercall_hvm_op(HVMOP_set_evtchn_upcall_vector, &a); -} - -#else /* CONFIG_XEN_GUEST */ - -#include - -#include - -static inline void xen_hypercall_console_write( - const char *buf, unsigned int count) -{ - ASSERT_UNREACHABLE(); -} - -static inline long xen_hypercall_shutdown(unsigned int reason) -{ - ASSERT_UNREACHABLE(); - return 0; -} - -#endif /* CONFIG_XEN_GUEST */ -#endif /* __X86_XEN_HYPERCALL_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest/xen.h b/xen/include/asm-x86/guest/xen.h deleted file mode 100644 index 2042a9a0c2..0000000000 --- a/xen/include/asm-x86/guest/xen.h +++ /dev/null @@ -1,61 +0,0 @@ -/****************************************************************************** - * asm-x86/guest/xen.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2017 Citrix Systems Ltd. - */ - -#ifndef __X86_GUEST_XEN_H__ -#define __X86_GUEST_XEN_H__ - -#include - -#include -#include -#include - -#define XEN_shared_info ((struct shared_info *)fix_to_virt(FIX_XEN_SHARED_INFO)) - -#ifdef CONFIG_XEN_GUEST - -extern bool xen_guest; -extern bool pv_console; -extern uint32_t xen_cpuid_base; - -const struct hypervisor_ops *xg_probe(void); -int xg_alloc_unused_page(mfn_t *mfn); -int xg_free_unused_page(mfn_t mfn); - -DECLARE_PER_CPU(unsigned int, vcpu_id); -DECLARE_PER_CPU(struct vcpu_info *, vcpu_info); - -#else - -#define xen_guest 0 -#define pv_console 0 - -static inline const struct hypervisor_ops *xg_probe(void) { return NULL; } - -#endif /* CONFIG_XEN_GUEST */ -#endif /* __X86_GUEST_XEN_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest_access.h b/xen/include/asm-x86/guest_access.h deleted file mode 100644 index dbf789fa58..0000000000 --- a/xen/include/asm-x86/guest_access.h +++ /dev/null @@ -1,59 +0,0 @@ -/****************************************************************************** - * guest_access.h - * - * Copyright (c) 2006, K A Fraser - */ - -#ifndef __ASM_X86_GUEST_ACCESS_H__ -#define __ASM_X86_GUEST_ACCESS_H__ - -#include -#include -#include -#include - -/* Raw access functions: no type checking. */ -#define raw_copy_to_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ - copy_to_user_hvm((dst), (src), (len)) : \ - copy_to_guest_pv(dst, src, len)) -#define raw_copy_from_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ - copy_from_user_hvm((dst), (src), (len)) : \ - copy_from_guest_pv(dst, src, len)) -#define raw_clear_guest(dst, len) \ - (is_hvm_vcpu(current) ? \ - clear_user_hvm((dst), (len)) : \ - clear_guest_pv(dst, len)) -#define __raw_copy_to_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ - copy_to_user_hvm((dst), (src), (len)) : \ - __copy_to_guest_pv(dst, src, len)) -#define __raw_copy_from_guest(dst, src, len) \ - (is_hvm_vcpu(current) ? \ - copy_from_user_hvm((dst), (src), (len)) : \ - __copy_from_guest_pv(dst, src, len)) - -/* - * Pre-validate a guest handle. - * Allows use of faster __copy_* functions. - */ -#define guest_handle_okay(hnd, nr) \ - (paging_mode_external(current->domain) || \ - array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))) -#define guest_handle_subrange_okay(hnd, first, last) \ - (paging_mode_external(current->domain) || \ - array_access_ok((hnd).p + (first), \ - (last)-(first)+1, \ - sizeof(*(hnd).p))) - -#endif /* __ASM_X86_GUEST_ACCESS_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest_atomics.h b/xen/include/asm-x86/guest_atomics.h deleted file mode 100644 index c2dec0d650..0000000000 --- a/xen/include/asm-x86/guest_atomics.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _X86_GUEST_ATOMICS_H -#define _X86_GUEST_ATOMICS_H - -#include - -/* - * It is safe to use the atomics helpers on x86 on memory shared with - * the guests. - */ -#define guest_set_bit(d, nr, p) ((void)(d), set_bit(nr, p)) -#define guest_clear_bit(d, nr, p) ((void)(d), clear_bit(nr, p)) -#define guest_change_bit(d, nr, p) ((void)(d), change_bit(nr, p)) -#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p)) - -#define guest_test_and_set_bit(d, nr, p) \ - ((void)(d), test_and_set_bit(nr, p)) -#define guest_test_and_clear_bit(d, nr, p) \ - ((void)(d), test_and_clear_bit(nr, p)) -#define guest_test_and_change_bit(d, nr, p) \ - ((void)(d), test_and_change_bit(nr, p)) - -#define guest_cmpxchg(d, ptr, o, n) ((void)(d), cmpxchg(ptr, o, n)) -#define guest_cmpxchg64 guest_cmpxchg - -#endif /* _X86_GUEST_ATOMICS_H */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h deleted file mode 100644 index 6647ccfb85..0000000000 --- a/xen/include/asm-x86/guest_pt.h +++ /dev/null @@ -1,468 +0,0 @@ -/****************************************************************************** - * xen/asm-x86/guest_pt.h - * - * Types and accessors for guest pagetable entries, as distinct from - * Xen's pagetable types. - * - * Users must #define GUEST_PAGING_LEVELS to 2, 3 or 4 before including - * this file. - * - * Parts of this code are Copyright (c) 2006 by XenSource Inc. - * Parts of this code are Copyright (c) 2006 by Michael A Fetterman - * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef _XEN_ASM_GUEST_PT_H -#define _XEN_ASM_GUEST_PT_H - -#if !defined(GUEST_PAGING_LEVELS) -#error GUEST_PAGING_LEVELS not defined -#endif - -static inline paddr_t -gfn_to_paddr(gfn_t gfn) -{ - return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT; -} - -/* Override get_gfn to work with gfn_t */ -#undef get_gfn -#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC) - -/* Mask covering the reserved bits from superpage alignment. */ -#define SUPERPAGE_RSVD(bit) \ - (((1ul << (bit)) - 1) & ~(_PAGE_PSE_PAT | (_PAGE_PSE_PAT - 1ul))) - -static inline uint32_t fold_pse36(uint64_t val) -{ - return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 32)) >> (32 - 13)); -} -static inline uint64_t unfold_pse36(uint32_t val) -{ - return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 13)) << (32 - 13)); -} - -/* Types of the guest's page tables and access functions for them */ - -#if GUEST_PAGING_LEVELS == 2 - -#define GUEST_L1_PAGETABLE_ENTRIES 1024 -#define GUEST_L2_PAGETABLE_ENTRIES 1024 - -#define GUEST_L1_PAGETABLE_SHIFT 12 -#define GUEST_L2_PAGETABLE_SHIFT 22 - -#define GUEST_L1_PAGETABLE_RSVD 0 -#define GUEST_L2_PAGETABLE_RSVD 0 - -typedef uint32_t guest_intpte_t; -typedef struct { guest_intpte_t l1; } guest_l1e_t; -typedef struct { guest_intpte_t l2; } guest_l2e_t; - -#define PRI_gpte "08x" - -static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) -{ return _gfn(gl1e.l1 >> PAGE_SHIFT); } -static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) -{ return _gfn(gl2e.l2 >> PAGE_SHIFT); } - -static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) -{ return gl1e.l1 & 0xfff; } -static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) -{ return gl2e.l2 & 0xfff; } - -static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e) -{ return 0; } -static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e) -{ return 0; } - -static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) -{ return (guest_l1e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } -static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) -{ return (guest_l2e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } - -#define guest_l1_table_offset(_va) \ - (((_va) >> GUEST_L1_PAGETABLE_SHIFT) & (GUEST_L1_PAGETABLE_ENTRIES - 1)) -#define guest_l2_table_offset(_va) \ - (((_va) >> GUEST_L2_PAGETABLE_SHIFT) & (GUEST_L2_PAGETABLE_ENTRIES - 1)) - -#else /* GUEST_PAGING_LEVELS != 2 */ - -#if GUEST_PAGING_LEVELS == 3 - -#define GUEST_L1_PAGETABLE_ENTRIES 512 -#define GUEST_L2_PAGETABLE_ENTRIES 512 -#define GUEST_L3_PAGETABLE_ENTRIES 4 - -#define GUEST_L1_PAGETABLE_SHIFT 12 -#define GUEST_L2_PAGETABLE_SHIFT 21 -#define GUEST_L3_PAGETABLE_SHIFT 30 - -#define GUEST_L1_PAGETABLE_RSVD 0x7ff0000000000000ul -#define GUEST_L2_PAGETABLE_RSVD 0x7ff0000000000000ul -#define GUEST_L3_PAGETABLE_RSVD \ - (0xfff0000000000000ul | _PAGE_GLOBAL | _PAGE_PSE | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_USER | _PAGE_RW) - -#else /* GUEST_PAGING_LEVELS == 4 */ - -#define GUEST_L1_PAGETABLE_ENTRIES 512 -#define GUEST_L2_PAGETABLE_ENTRIES 512 -#define GUEST_L3_PAGETABLE_ENTRIES 512 -#define GUEST_L4_PAGETABLE_ENTRIES 512 - -#define GUEST_L1_PAGETABLE_SHIFT 12 -#define GUEST_L2_PAGETABLE_SHIFT 21 -#define GUEST_L3_PAGETABLE_SHIFT 30 -#define GUEST_L4_PAGETABLE_SHIFT 39 - -#define GUEST_L1_PAGETABLE_RSVD 0 -#define GUEST_L2_PAGETABLE_RSVD 0 -#define GUEST_L3_PAGETABLE_RSVD 0 -/* NB L4e._PAGE_GLOBAL is reserved for AMD, but ignored for Intel. */ -#define GUEST_L4_PAGETABLE_RSVD _PAGE_PSE - -#endif - -typedef l1_pgentry_t guest_l1e_t; -typedef l2_pgentry_t guest_l2e_t; -typedef l3_pgentry_t guest_l3e_t; -#if GUEST_PAGING_LEVELS >= 4 -typedef l4_pgentry_t guest_l4e_t; -#endif -typedef intpte_t guest_intpte_t; - -#define PRI_gpte "016"PRIx64 - -static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) -{ return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); } -static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) -{ return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); } -static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e) -{ return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); } -#if GUEST_PAGING_LEVELS >= 4 -static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e) -{ return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); } -#endif - -static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) -{ return l1e_get_flags(gl1e); } -static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) -{ return l2e_get_flags(gl2e); } -static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e) -{ return l3e_get_flags(gl3e); } -#if GUEST_PAGING_LEVELS >= 4 -static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e) -{ return l4e_get_flags(gl4e); } -#endif - -static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e) -{ return l1e_get_pkey(gl1e); } -static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e) -{ return l2e_get_pkey(gl2e); } -static inline u32 guest_l3e_get_pkey(guest_l3e_t gl3e) -{ return l3e_get_pkey(gl3e); } - -static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) -{ return l1e_from_pfn(gfn_x(gfn), flags); } -static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) -{ return l2e_from_pfn(gfn_x(gfn), flags); } -static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags) -{ return l3e_from_pfn(gfn_x(gfn), flags); } -#if GUEST_PAGING_LEVELS >= 4 -static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags) -{ return l4e_from_pfn(gfn_x(gfn), flags); } -#endif - -#define guest_l1_table_offset(a) l1_table_offset(a) -#define guest_l2_table_offset(a) l2_table_offset(a) -#define guest_l3_table_offset(a) l3_table_offset(a) -#define guest_l4_table_offset(a) l4_table_offset(a) - -#endif /* GUEST_PAGING_LEVELS != 2 */ - -/* Mask of the GFNs covered by an L2 or L3 superpage */ -#define GUEST_L2_GFN_MASK (GUEST_L1_PAGETABLE_ENTRIES - 1) -#define GUEST_L3_GFN_MASK \ - ((GUEST_L2_PAGETABLE_ENTRIES * GUEST_L1_PAGETABLE_ENTRIES) - 1) - - -/* Which pagetable features are supported on this vcpu? */ - -static always_inline bool guest_can_use_l2_superpages(const struct vcpu *v) -{ - /* - * PV guests use Xen's paging settings. Being 4-level, 2M - * superpages are unconditionally supported. - * - * The L2 _PAGE_PSE bit must be honoured in HVM guests, whenever - * CR4.PSE is set or the guest is in PAE or long mode. - * It's also used in the dummy PT for vcpus with CR0.PG cleared. - */ - return (is_pv_vcpu(v) || - GUEST_PAGING_LEVELS != 2 || - !hvm_paging_enabled(v) || - (v->arch.hvm.guest_cr[4] & X86_CR4_PSE)); -} - -static always_inline bool guest_can_use_l3_superpages(const struct domain *d) -{ - /* - * There are no control register settings for the hardware pagewalk on the - * subject of 1G superpages. - * - * Shadow pagetables don't support 1GB superpages at all, and will always - * treat L3 _PAGE_PSE as reserved. - * - * With HAP however, if the guest constructs a 1GB superpage on capable - * hardware, it will function irrespective of whether the feature is - * advertised. Xen's model of performing a pagewalk should match. - */ - return GUEST_PAGING_LEVELS >= 4 && paging_mode_hap(d) && cpu_has_page1gb; -} - -static inline bool guest_can_use_pse36(const struct domain *d) -{ - /* - * Only called in the context of 2-level guests, after - * guest_can_use_l2_superpages() has indicated true. - * - * Shadow pagetables don't support PSE36 superpages at all, and will - * always treat them as reserved. - * - * With HAP however, once L2 superpages are active, here are no control - * register settings for the hardware pagewalk on the subject of PSE36. - * If the guest constructs a PSE36 superpage on capable hardware, it will - * function irrespective of whether the feature is advertised. Xen's - * model of performing a pagewalk should match. - */ - return paging_mode_hap(d) && cpu_has_pse36; -} - -static always_inline bool guest_nx_enabled(const struct vcpu *v) -{ - if ( GUEST_PAGING_LEVELS == 2 ) /* NX has no effect witout CR4.PAE. */ - return false; - - /* PV guests can't control EFER.NX, and inherits Xen's choice. */ - return is_pv_vcpu(v) ? cpu_has_nx : hvm_nx_enabled(v); -} - -static always_inline bool guest_wp_enabled(const struct vcpu *v) -{ - /* PV guests can't control CR0.WP, and it is unconditionally set by Xen. */ - return is_pv_vcpu(v) || hvm_wp_enabled(v); -} - -static always_inline bool guest_smep_enabled(const struct vcpu *v) -{ - return !is_pv_vcpu(v) && hvm_smep_enabled(v); -} - -static always_inline bool guest_smap_enabled(const struct vcpu *v) -{ - return !is_pv_vcpu(v) && hvm_smap_enabled(v); -} - -static always_inline bool guest_pku_enabled(const struct vcpu *v) -{ - return !is_pv_vcpu(v) && hvm_pku_enabled(v); -} - -/* Helpers for identifying whether guest entries have reserved bits set. */ - -/* Bits reserved because of maxphysaddr, and (lack of) EFER.NX */ -static always_inline uint64_t guest_rsvd_bits(const struct vcpu *v) -{ - return ((PADDR_MASK & - ~((1ul << v->domain->arch.cpuid->extd.maxphysaddr) - 1)) | - (guest_nx_enabled(v) ? 0 : put_pte_flags(_PAGE_NX_BIT))); -} - -static always_inline bool guest_l1e_rsvd_bits(const struct vcpu *v, - guest_l1e_t l1e) -{ - return l1e.l1 & (guest_rsvd_bits(v) | GUEST_L1_PAGETABLE_RSVD); -} - -static always_inline bool guest_l2e_rsvd_bits(const struct vcpu *v, - guest_l2e_t l2e) -{ - uint64_t rsvd_bits = guest_rsvd_bits(v); - - return ((l2e.l2 & (rsvd_bits | GUEST_L2_PAGETABLE_RSVD | - (guest_can_use_l2_superpages(v) ? 0 : _PAGE_PSE))) || - ((l2e.l2 & _PAGE_PSE) && - (l2e.l2 & ((GUEST_PAGING_LEVELS == 2 && guest_can_use_pse36(v->domain)) - /* PSE36 tops out at 40 bits of address width. */ - ? (fold_pse36(rsvd_bits | (1ul << 40))) - : SUPERPAGE_RSVD(GUEST_L2_PAGETABLE_SHIFT))))); -} - -#if GUEST_PAGING_LEVELS >= 3 -static always_inline bool guest_l3e_rsvd_bits(const struct vcpu *v, - guest_l3e_t l3e) -{ - return ((l3e.l3 & (guest_rsvd_bits(v) | GUEST_L3_PAGETABLE_RSVD | - (guest_can_use_l3_superpages(v->domain) ? 0 : _PAGE_PSE))) || - ((l3e.l3 & _PAGE_PSE) && - (l3e.l3 & SUPERPAGE_RSVD(GUEST_L3_PAGETABLE_SHIFT)))); -} - -#if GUEST_PAGING_LEVELS >= 4 -static always_inline bool guest_l4e_rsvd_bits(const struct vcpu *v, - guest_l4e_t l4e) -{ - return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD | - ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD) - ? _PAGE_GLOBAL : 0)); -} -#endif /* GUEST_PAGING_LEVELS >= 4 */ -#endif /* GUEST_PAGING_LEVELS >= 3 */ - -/* Type used for recording a walk through guest pagetables. It is - * filled in by the pagetable walk function, and also used as a cache - * for later walks. When we encounter a superpage l2e, we fabricate an - * l1e for propagation to the shadow (for splintering guest superpages - * into many shadow l1 entries). */ -typedef struct guest_pagetable_walk walk_t; -struct guest_pagetable_walk -{ - unsigned long va; /* Address we were looking for */ -#if GUEST_PAGING_LEVELS >= 3 -#if GUEST_PAGING_LEVELS >= 4 - guest_l4e_t l4e; /* Guest's level 4 entry */ -#endif - guest_l3e_t l3e; /* Guest's level 3 entry */ -#endif - guest_l2e_t l2e; /* Guest's level 2 entry */ - union - { - guest_l1e_t l1e; /* Guest's level 1 entry (or fabrication). */ - uint64_t el1e; /* L2 PSE36 superpages wider than 32 bits. */ - }; -#if GUEST_PAGING_LEVELS >= 4 - mfn_t l4mfn; /* MFN that the level 4 entry was in */ - mfn_t l3mfn; /* MFN that the level 3 entry was in */ -#endif - mfn_t l2mfn; /* MFN that the level 2 entry was in */ - mfn_t l1mfn; /* MFN that the level 1 entry was in */ - - uint32_t pfec; /* Accumulated PFEC_* error code from walk. */ -}; - -/* Given a walk_t, translate the gw->va into the guest's notion of the - * corresponding frame number. */ -static inline gfn_t guest_walk_to_gfn(const walk_t *gw) -{ - if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) ) - return INVALID_GFN; - return (GUEST_PAGING_LEVELS == 2 - ? _gfn(gw->el1e >> PAGE_SHIFT) - : guest_l1e_get_gfn(gw->l1e)); -} - -/* Given a walk_t, translate the gw->va into the guest's notion of the - * corresponding physical address. */ -static inline paddr_t guest_walk_to_gpa(const walk_t *gw) -{ - gfn_t gfn = guest_walk_to_gfn(gw); - - if ( gfn_eq(gfn, INVALID_GFN) ) - return INVALID_PADDR; - - return (gfn_x(gfn) << PAGE_SHIFT) | (gw->va & ~PAGE_MASK); -} - -/* Given a walk_t from a successful walk, return the page-order of the - * page or superpage that the virtual address is in. */ -static inline unsigned int guest_walk_to_page_order(const walk_t *gw) -{ - /* This is only valid for successful walks - otherwise the - * PSE bits might be invalid. */ - ASSERT(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT); -#if GUEST_PAGING_LEVELS >= 3 - if ( guest_l3e_get_flags(gw->l3e) & _PAGE_PSE ) - return GUEST_L3_PAGETABLE_SHIFT - PAGE_SHIFT; -#endif - if ( guest_l2e_get_flags(gw->l2e) & _PAGE_PSE ) - return GUEST_L2_PAGETABLE_SHIFT - PAGE_SHIFT; - return GUEST_L1_PAGETABLE_SHIFT - PAGE_SHIFT; -} - - -/* - * Walk the guest pagetables, after the manner of a hardware walker. - * - * Inputs: a vcpu, a virtual address, a walk_t to fill, a - * pointer to a pagefault code, the MFN of the guest's - * top-level pagetable, and a mapping of the - * guest's top-level pagetable. - * - * We walk the vcpu's guest pagetables, filling the walk_t with what we - * see and adding any Accessed and Dirty bits that are needed in the - * guest entries. Using the pagefault code, we check the permissions as - * we go. For the purposes of reading pagetables we treat all non-RAM - * memory as contining zeroes. - * - * Returns a boolean indicating success or failure. walk_t.pfec contains - * the accumulated error code on failure. - */ - -/* Macro-fu so you can call guest_walk_tables() and get the right one. */ -#define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels -#define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l) -#define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS) - -bool -guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, - unsigned long va, walk_t *gw, uint32_t pfec, - gfn_t top_gfn, mfn_t top_mfn, void *top_map); - -/* Pretty-print the contents of a guest-walk */ -static inline void print_gw(const walk_t *gw) -{ - gprintk(XENLOG_INFO, "GUEST WALK TO %p\n", _p(gw->va)); -#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */ -#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ - gprintk(XENLOG_INFO, " l4e=%" PRI_gpte " l4mfn=%" PRI_mfn "\n", - gw->l4e.l4, mfn_x(gw->l4mfn)); - gprintk(XENLOG_INFO, " l3e=%" PRI_gpte " l3mfn=%" PRI_mfn "\n", - gw->l3e.l3, mfn_x(gw->l3mfn)); -#else /* PAE only... */ - gprintk(XENLOG_INFO, " l3e=%" PRI_gpte "\n", gw->l3e.l3); -#endif /* PAE or 64... */ -#endif /* All levels... */ - gprintk(XENLOG_INFO, " l2e=%" PRI_gpte " l2mfn=%" PRI_mfn "\n", - gw->l2e.l2, mfn_x(gw->l2mfn)); -#if GUEST_PAGING_LEVELS == 2 - gprintk(XENLOG_INFO, " el1e=%08" PRIx64 " l1mfn=%" PRI_mfn "\n", - gw->el1e, mfn_x(gw->l1mfn)); -#else - gprintk(XENLOG_INFO, " l1e=%" PRI_gpte " l1mfn=%" PRI_mfn "\n", - gw->l1e.l1, mfn_x(gw->l1mfn)); -#endif - gprintk(XENLOG_INFO, " pfec=%02x[%c%c%c%c%c%c]\n", gw->pfec, - gw->pfec & PFEC_prot_key ? 'K' : '-', - gw->pfec & PFEC_insn_fetch ? 'I' : 'd', - gw->pfec & PFEC_reserved_bit ? 'R' : '-', - gw->pfec & PFEC_user_mode ? 'U' : 's', - gw->pfec & PFEC_write_access ? 'W' : 'r', - gw->pfec & PFEC_page_present ? 'P' : '-' - ); -} - -#endif /* _XEN_ASM_GUEST_PT_H */ diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h deleted file mode 100644 index 90dece29de..0000000000 --- a/xen/include/asm-x86/hap.h +++ /dev/null @@ -1,60 +0,0 @@ -/****************************************************************************** - * include/asm-x86/hap.h - * - * hardware-assisted paging - * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) - * - * Parts of this code are Copyright (c) 2006 by XenSource Inc. - * Parts of this code are Copyright (c) 2006 by Michael A Fetterman - * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef _XEN_HAP_H -#define _XEN_HAP_H - -#define HAP_PRINTK(_f, _a...) \ - debugtrace_printk("hap: %s(): " _f, __func__, ##_a) - -/************************************************/ -/* hap domain level functions */ -/************************************************/ -void hap_domain_init(struct domain *d); -int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl); -int hap_enable(struct domain *d, u32 mode); -void hap_final_teardown(struct domain *d); -void hap_vcpu_teardown(struct vcpu *v); -void hap_teardown(struct domain *d, bool *preempted); -void hap_vcpu_init(struct vcpu *v); -int hap_track_dirty_vram(struct domain *d, - unsigned long begin_pfn, - unsigned int nr_frames, - XEN_GUEST_HANDLE(void) dirty_bitmap); - -extern const struct paging_mode *hap_paging_get_mode(struct vcpu *); -int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted); -unsigned int hap_get_allocation(struct domain *d); - -#endif /* XEN_HAP_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hardirq.h b/xen/include/asm-x86/hardirq.h deleted file mode 100644 index 276e3419d7..0000000000 --- a/xen/include/asm-x86/hardirq.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef __ASM_HARDIRQ_H -#define __ASM_HARDIRQ_H - -#include -#include - -typedef struct { - unsigned int __softirq_pending; - unsigned int __local_irq_count; - unsigned int nmi_count; - unsigned int mce_count; - bool_t __mwait_wakeup; -} __cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - -#define in_irq() (local_irq_count(smp_processor_id()) != 0) - -#define irq_enter() (local_irq_count(smp_processor_id())++) -#define irq_exit() (local_irq_count(smp_processor_id())--) - -#define nmi_count(cpu) __IRQ_STAT(cpu, nmi_count) -#define in_nmi_handler() (nmi_count(smp_processor_id()) != 0) -#define nmi_enter() (nmi_count(smp_processor_id())++) -#define nmi_exit() (nmi_count(smp_processor_id())--) - -#define mce_count(cpu) __IRQ_STAT(cpu, mce_count) -#define in_mce_handler() (mce_count(smp_processor_id()) != 0) -#define mce_enter() (mce_count(smp_processor_id())++) -#define mce_exit() (mce_count(smp_processor_id())--) - -void ack_bad_irq(unsigned int irq); - -extern void apic_intr_init(void); -extern void smp_intr_init(void); - -#endif /* __ASM_HARDIRQ_H */ diff --git a/xen/include/asm-x86/hpet.h b/xen/include/asm-x86/hpet.h deleted file mode 100644 index 8f9725a95e..0000000000 --- a/xen/include/asm-x86/hpet.h +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef __X86_HPET_H__ -#define __X86_HPET_H__ - -/* - * Documentation on HPET can be found at: - * http://www.intel.com/content/dam/www/public/us/en/documents/ - * technical-specifications/software-developers-hpet-spec-1-0a.pdf - */ - -#define HPET_MMAP_SIZE 1024 - -#define HPET_ID 0x000 -#define HPET_PERIOD 0x004 -#define HPET_CFG 0x010 -#define HPET_STATUS 0x020 -#define HPET_COUNTER 0x0f0 -#define HPET_Tn_CFG(n) (0x100 + (n) * 0x20) -#define HPET_Tn_CMP(n) (0x108 + (n) * 0x20) -#define HPET_Tn_ROUTE(n) (0x110 + (n) * 0x20) - -#define HPET_ID_VENDOR 0xffff0000 -#define HPET_ID_LEGSUP 0x00008000 -#define HPET_ID_64BIT 0x00002000 -#define HPET_ID_NUMBER 0x00001f00 -#define HPET_ID_REV 0x000000ff -#define HPET_ID_NUMBER_SHIFT 8 -#define HPET_ID_VENDOR_SHIFT 16 - -#define HPET_CFG_ENABLE 0x001 -#define HPET_CFG_LEGACY 0x002 -#define HPET_LEGACY_8254 2 -#define HPET_LEGACY_RTC 8 - -#define HPET_TN_LEVEL 0x002 -#define HPET_TN_ENABLE 0x004 -#define HPET_TN_PERIODIC 0x008 -#define HPET_TN_PERIODIC_CAP 0x010 -#define HPET_TN_64BIT_CAP 0x020 -#define HPET_TN_SETVAL 0x040 -#define HPET_TN_32BIT 0x100 -#define HPET_TN_ROUTE 0x3e00 -#define HPET_TN_FSB 0x4000 -#define HPET_TN_FSB_CAP 0x8000 -#define HPET_TN_RESERVED 0xffff0081 -#define HPET_TN_INT_ROUTE_CAP (0xffffffffULL << 32) - - -#define hpet_read32(x) \ - (*(volatile u32 *)(fix_to_virt(FIX_HPET_BASE) + (x))) -#define hpet_write32(y,x) \ - (*(volatile u32 *)(fix_to_virt(FIX_HPET_BASE) + (x)) = (y)) - -extern unsigned long hpet_address; -extern u8 hpet_blockid; -extern u8 hpet_flags; -extern int8_t opt_hpet_legacy_replacement; - -/* - * Detect and initialise HPET hardware: return counter update frequency. - * Return value is zero if HPET is unavailable. - */ -u64 hpet_setup(void); -void hpet_resume(u32 *); - -/* - * Disable HPET hardware: restore it to boot time state. - */ -void hpet_disable(void); - -/* - * Callback from legacy timer (PIT channel 0) IRQ handler. - * Returns 1 if tick originated from HPET; else 0. - */ -int hpet_legacy_irq_tick(void); - -/* - * Try to enable HPET Legacy Replacement mode. Returns a boolean indicating - * whether the HPET configuration was changed. - */ -bool hpet_enable_legacy_replacement_mode(void); - -/* - * Undo the effects of hpet_disable_legacy_replacement_mode(). Must not be - * called unless enable() returned true. - */ -void hpet_disable_legacy_replacement_mode(void); - -/* - * Temporarily use an HPET event counter for timer interrupt handling, - * rather than using the LAPIC timer. Used for Cx state entry. - */ -void hpet_broadcast_init(void); -void hpet_broadcast_resume(void); -void hpet_broadcast_enter(void); -void hpet_broadcast_exit(void); -int hpet_broadcast_is_available(void); -void hpet_disable_legacy_broadcast(void); - -extern void (*pv_rtc_handler)(uint8_t reg, uint8_t value); - -#endif /* __X86_HPET_H__ */ diff --git a/xen/include/asm-x86/hvm/asid.h b/xen/include/asm-x86/hvm/asid.h deleted file mode 100644 index 633ddb72e4..0000000000 --- a/xen/include/asm-x86/hvm/asid.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * asid.h: ASID management - * Copyright (c) 2007, Advanced Micro Devices, Inc. - * Copyright (c) 2009, Citrix Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_ASID_H__ -#define __ASM_X86_HVM_ASID_H__ - - -struct vcpu; -struct hvm_vcpu_asid; - -/* Initialise ASID management for the current physical CPU. */ -void hvm_asid_init(int nasids); - -/* Invalidate a particular ASID allocation: forces re-allocation. */ -void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid); - -/* Invalidate all ASID allocations for specified VCPU: forces re-allocation. */ -void hvm_asid_flush_vcpu(struct vcpu *v); - -/* Flush all ASIDs on this processor core. */ -void hvm_asid_flush_core(void); - -/* Called before entry to guest context. Checks ASID allocation, returns a - * boolean indicating whether all ASIDs must be flushed. */ -bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid); - -#endif /* __ASM_X86_HVM_ASID_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/cacheattr.h b/xen/include/asm-x86/hvm/cacheattr.h deleted file mode 100644 index 79e721d074..0000000000 --- a/xen/include/asm-x86/hvm/cacheattr.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __HVM_CACHEATTR_H__ -#define __HVM_CACHEATTR_H__ - -#include - -struct domain; -void hvm_init_cacheattr_region_list(struct domain *d); -void hvm_destroy_cacheattr_region_list(struct domain *d); - -/* - * Check whether gfn is in the pinned range: - * if yes, return the (non-negative) type - * if no or ambiguous, return a negative error code - */ -int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn, - unsigned int order); - - -/* Set pinned caching type for a domain. */ -int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, - uint64_t gfn_end, uint32_t type); - -#endif /* __HVM_CACHEATTR_H__ */ diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h deleted file mode 100644 index 698455444e..0000000000 --- a/xen/include/asm-x86/hvm/domain.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * domain.h: HVM per domain definitions - * - * Copyright (c) 2004, Intel Corporation. - * Copyright (c) 2005, International Business Machines Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_DOMAIN_H__ -#define __ASM_X86_HVM_DOMAIN_H__ - -#include -#include -#include - -#include -#include -#include - -#ifdef CONFIG_MEM_SHARING -struct mem_sharing_domain -{ - bool enabled, block_interrupts; - - /* - * When releasing shared gfn's in a preemptible manner, recall where - * to resume the search. - */ - unsigned long next_shared_gfn_to_relinquish; -}; -#endif - -/* - * This structure defines function hooks to support hardware-assisted - * virtual interrupt delivery to guest. (e.g. VMX PI and SVM AVIC). - * - * These hooks are defined by the underlying arch-specific code - * as needed. For example: - * - When the domain is enabled with virtual IPI delivery - * - When the domain is enabled with virtual I/O int delivery - * and actually has a physical device assigned . - */ -struct hvm_pi_ops { - unsigned int flags; - - /* - * Hook into arch_vcpu_block(), which is called - * from vcpu_block() and vcpu_do_poll(). - */ - void (*vcpu_block)(struct vcpu *); -}; - -struct hvm_domain { - /* Guest page range used for non-default ioreq servers */ - struct { - unsigned long base; - unsigned long mask; /* indexed by GFN minus base */ - unsigned long legacy_mask; /* indexed by HVM param number */ - } ioreq_gfn; - - /* Cached CF8 for guest PCI config cycles */ - uint32_t pci_cf8; - - struct pl_time *pl_time; - - struct hvm_io_handler *io_handler; - unsigned int io_handler_count; - - /* Lock protects access to irq, vpic and vioapic. */ - spinlock_t irq_lock; - struct hvm_irq *irq; - struct hvm_hw_vpic vpic[2]; /* 0=master; 1=slave */ - struct hvm_vioapic **vioapic; - unsigned int nr_vioapics; - struct hvm_hw_stdvga stdvga; - - /* - * hvm_hw_pmtimer is a publicly-visible name. We will defer renaming - * it to the more appropriate hvm_hw_acpi until the expected - * comprehensive rewrte of migration code, thus avoiding code churn - * in public header files. - * Internally, however, we will be using hvm_hw_acpi. - */ -#define hvm_hw_acpi hvm_hw_pmtimer - struct hvm_hw_acpi acpi; - - /* VCPU which is current target for 8259 interrupts. */ - struct vcpu *i8259_target; - - /* emulated irq to pirq */ - struct radix_tree_root emuirq_pirq; - - uint64_t *params; - - /* Memory ranges with pinned cache attributes. */ - struct list_head pinned_cacheattr_ranges; - - /* VRAM dirty support. Protect with the domain paging lock. */ - struct sh_dirty_vram *dirty_vram; - - /* If one of vcpus of this domain is in no_fill_mode or - * mtrr/pat between vcpus is not the same, set is_in_uc_mode - */ - spinlock_t uc_lock; - bool is_in_uc_mode; - - bool is_s3_suspended; - - /* hypervisor intercepted msix table */ - struct list_head msixtbl_list; - - struct viridian_domain *viridian; - - /* - * TSC value that VCPUs use to calculate their tsc_offset value. - * Used during initialization and save/restore. - */ - uint64_t sync_tsc; - - uint64_t tsc_scaling_ratio; - - unsigned long *io_bitmap; - - /* List of guest to machine IO ports mapping. */ - struct list_head g2m_ioport_list; - - /* List of MMCFG regions trapped by Xen. */ - struct list_head mmcfg_regions; - rwlock_t mmcfg_lock; - - /* List of MSI-X tables. */ - struct list_head msix_tables; - - /* List of permanently write-mapped pages. */ - struct { - spinlock_t lock; - struct list_head list; - } write_map; - - struct hvm_pi_ops pi_ops; - - union { - struct vmx_domain vmx; - struct svm_domain svm; - }; - -#ifdef CONFIG_MEM_SHARING - struct mem_sharing_domain mem_sharing; -#endif -}; - -#endif /* __ASM_X86_HVM_DOMAIN_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h deleted file mode 100644 index e670040603..0000000000 --- a/xen/include/asm-x86/hvm/emulate.h +++ /dev/null @@ -1,156 +0,0 @@ -/****************************************************************************** - * hvm/emulate.h - * - * HVM instruction emulation. Used for MMIO and VMX real mode. - * - * Copyright (c) 2008 Citrix Systems, Inc. - * - * Authors: - * Keir Fraser - */ - -#ifndef __ASM_X86_HVM_EMULATE_H__ -#define __ASM_X86_HVM_EMULATE_H__ - -#include -#include -#include -#include -#include - -typedef bool hvm_emulate_validate_t(const struct x86_emulate_state *state, - const struct x86_emulate_ctxt *ctxt); - -struct hvm_emulate_ctxt { - struct x86_emulate_ctxt ctxt; - - /* - * validate: Post-decode, pre-emulate hook to allow caller controlled - * filtering. - */ - hvm_emulate_validate_t *validate; - - /* Cache of 16 bytes of instruction. */ - uint8_t insn_buf[16]; - unsigned long insn_buf_eip; - unsigned int insn_buf_bytes; - - struct segment_register seg_reg[10]; - unsigned long seg_reg_accessed; - unsigned long seg_reg_dirty; - - /* - * MFNs behind temporary mappings in the write callback. The length is - * arbitrary, and can be increased if writes longer than PAGE_SIZE+1 are - * needed. - */ - mfn_t mfn[2]; - - uint32_t intr_shadow; - - bool is_mem_access; - - bool_t set_context; -}; - -enum emul_kind { - EMUL_KIND_NORMAL, - EMUL_KIND_NOWRITE, - EMUL_KIND_SET_CONTEXT_DATA, - EMUL_KIND_SET_CONTEXT_INSN -}; - -bool __nonnull(1, 2) hvm_emulate_one_insn( - hvm_emulate_validate_t *validate, - const char *descr); -int hvm_emulate_one( - struct hvm_emulate_ctxt *hvmemul_ctxt, - enum vio_completion completion); -void hvm_emulate_one_vm_event(enum emul_kind kind, - unsigned int trapnr, - unsigned int errcode); -/* Must be called once to set up hvmemul state. */ -void hvm_emulate_init_once( - struct hvm_emulate_ctxt *hvmemul_ctxt, - hvm_emulate_validate_t *validate, - struct cpu_user_regs *regs); -/* Must be called once before each instruction emulated. */ -void hvm_emulate_init_per_insn( - struct hvm_emulate_ctxt *hvmemul_ctxt, - const unsigned char *insn_buf, - unsigned int insn_bytes); -void hvm_emulate_writeback( - struct hvm_emulate_ctxt *hvmemul_ctxt); -void hvmemul_cancel(struct vcpu *v); -struct segment_register *hvmemul_get_seg_reg( - enum x86_segment seg, - struct hvm_emulate_ctxt *hvmemul_ctxt); -int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla); - -static inline bool handle_mmio(void) -{ - return hvm_emulate_one_insn(x86_insn_is_mem_access, "MMIO"); -} - -int hvmemul_insn_fetch(unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt); -int hvmemul_do_pio_buffer(uint16_t port, - unsigned int size, - uint8_t dir, - void *buffer); - -#ifdef CONFIG_HVM -/* - * The cache controlled by the functions below is not like an ordinary CPU - * cache, i.e. aiming to help performance, but a "secret store" which is - * needed for correctness. The issue it helps addressing is the need for - * re-execution of an insn (after data was provided by a device model) to - * observe the exact same memory state, i.e. to specifically not observe any - * updates which may have occurred in the meantime by other agents. - * Therefore this cache gets - * - enabled when emulation of an insn starts, - * - disabled across processing secondary things like a hypercall resulting - * from insn emulation, - * - disabled again when an emulated insn is known to not require any - * further re-execution. - */ -int __must_check hvmemul_cache_init(struct vcpu *v); -static inline void hvmemul_cache_destroy(struct vcpu *v) -{ - XFREE(v->arch.hvm.hvm_io.cache); -} -bool hvmemul_read_cache(const struct vcpu *, paddr_t gpa, - void *buffer, unsigned int size); -void hvmemul_write_cache(const struct vcpu *, paddr_t gpa, - const void *buffer, unsigned int size); -unsigned int hvmemul_cache_disable(struct vcpu *); -void hvmemul_cache_restore(struct vcpu *, unsigned int token); -/* For use in ASSERT()s only: */ -static inline bool hvmemul_cache_disabled(struct vcpu *v) -{ - return hvmemul_cache_disable(v) == hvmemul_cache_disable(v); -} -#else -static inline bool hvmemul_read_cache(const struct vcpu *v, paddr_t gpa, - void *buf, - unsigned int size) { return false; } -static inline void hvmemul_write_cache(const struct vcpu *v, paddr_t gpa, - const void *buf, unsigned int size) {} -#endif - -void hvm_dump_emulation_state(const char *loglvl, const char *prefix, - struct hvm_emulate_ctxt *hvmemul_ctxt, int rc); - -#endif /* __ASM_X86_HVM_EMULATE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/grant_table.h b/xen/include/asm-x86/hvm/grant_table.h deleted file mode 100644 index a5612585b3..0000000000 --- a/xen/include/asm-x86/hvm/grant_table.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * asm-x86/hvm/grant_table.h - * - * Grant table interfaces for HVM guests - * - * Copyright (C) 2017 Wei Liu - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __X86_HVM_GRANT_TABLE_H__ -#define __X86_HVM_GRANT_TABLE_H__ - -#ifdef CONFIG_HVM - -int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, - unsigned int flags, - unsigned int cache_flags); -int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame, - uint64_t new_addr, unsigned int flags); - -#else - -#include - -static inline int create_grant_p2m_mapping(uint64_t addr, mfn_t frame, - unsigned int flags, - unsigned int cache_flags) -{ - return GNTST_general_error; -} - -static inline int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame, - uint64_t new_addr, unsigned int flags) -{ - return GNTST_general_error; -} - -#endif - -#endif /* __X86_HVM_GRANT_TABLE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/guest_access.h b/xen/include/asm-x86/hvm/guest_access.h deleted file mode 100644 index edacba75db..0000000000 --- a/xen/include/asm-x86/hvm/guest_access.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_X86_HVM_GUEST_ACCESS_H__ -#define __ASM_X86_HVM_GUEST_ACCESS_H__ - -unsigned int copy_to_user_hvm(void *to, const void *from, unsigned int len); -unsigned int clear_user_hvm(void *to, unsigned int len); -unsigned int copy_from_user_hvm(void *to, const void *from, unsigned int len); - -#endif /* __ASM_X86_HVM_GUEST_ACCESS_H__ */ diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h deleted file mode 100644 index bd2cbb0e7b..0000000000 --- a/xen/include/asm-x86/hvm/hvm.h +++ /dev/null @@ -1,886 +0,0 @@ -/* - * hvm.h: Hardware virtual machine assist interface definitions. - * - * Leendert van Doorn, leendert@watson.ibm.com - * Copyright (c) 2005, International Business Machines Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_HVM_H__ -#define __ASM_X86_HVM_HVM_H__ - -#include -#include -#include -#include -#include - -#ifdef CONFIG_HVM_FEP -/* Permit use of the Forced Emulation Prefix in HVM guests */ -extern bool_t opt_hvm_fep; -#else -#define opt_hvm_fep 0 -#endif - -/* Interrupt acknowledgement sources. */ -enum hvm_intsrc { - hvm_intsrc_none, - hvm_intsrc_pic, - hvm_intsrc_lapic, - hvm_intsrc_nmi, - hvm_intsrc_mce, - hvm_intsrc_vector -}; -struct hvm_intack { - uint8_t source; /* enum hvm_intsrc */ - uint8_t vector; -}; -#define hvm_intack(src, vec) ((struct hvm_intack) { hvm_intsrc_##src, vec }) -#define hvm_intack_none hvm_intack(none, 0) -#define hvm_intack_pic(vec) hvm_intack(pic, vec) -#define hvm_intack_lapic(vec) hvm_intack(lapic, vec) -#define hvm_intack_nmi hvm_intack(nmi, 2) -#define hvm_intack_mce hvm_intack(mce, 18) -#define hvm_intack_vector(vec) hvm_intack(vector, vec) -enum hvm_intblk { - hvm_intblk_none, /* not blocked (deliverable) */ - hvm_intblk_shadow, /* MOV-SS or STI shadow */ - hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */ - hvm_intblk_tpr, /* LAPIC TPR too high */ - hvm_intblk_nmi_iret, /* NMI blocked until IRET */ - hvm_intblk_arch, /* SVM/VMX specific reason */ -}; - -/* These happen to be the same as the VMX interrupt shadow definitions. */ -#define HVM_INTR_SHADOW_STI 0x00000001 -#define HVM_INTR_SHADOW_MOV_SS 0x00000002 -#define HVM_INTR_SHADOW_SMI 0x00000004 -#define HVM_INTR_SHADOW_NMI 0x00000008 - -/* - * HAP super page capabilities: - * bit0: if 2MB super page is allowed? - * bit1: if 1GB super page is allowed? - */ -#define HVM_HAP_SUPERPAGE_2MB 0x00000001 -#define HVM_HAP_SUPERPAGE_1GB 0x00000002 - -#define HVM_EVENT_VECTOR_UNSET (-1) -#define HVM_EVENT_VECTOR_UPDATING (-2) - -/* update_guest_cr() flags. */ -#define HVM_UPDATE_GUEST_CR3_NOFLUSH 0x00000001 - -/* - * The hardware virtual machine (HVM) interface abstracts away from the - * x86/x86_64 CPU virtualization assist specifics. Currently this interface - * supports Intel's VT-x and AMD's SVM extensions. - */ -struct hvm_function_table { - char *name; - - /* Support Hardware-Assisted Paging? */ - bool_t hap_supported; - - /* Necessary hardware support for alternate p2m's? */ - bool altp2m_supported; - - /* Hardware virtual interrupt delivery enable? */ - bool virtual_intr_delivery_enabled; - - /* Indicate HAP capabilities. */ - unsigned int hap_capabilities; - - /* - * Initialise/destroy HVM domain/vcpu resources - */ - int (*domain_initialise)(struct domain *d); - void (*domain_creation_finished)(struct domain *d); - void (*domain_relinquish_resources)(struct domain *d); - void (*domain_destroy)(struct domain *d); - int (*vcpu_initialise)(struct vcpu *v); - void (*vcpu_destroy)(struct vcpu *v); - - /* save and load hvm guest cpu context for save/restore */ - void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); - int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); - - /* Examine specifics of the guest state. */ - unsigned int (*get_interrupt_shadow)(struct vcpu *v); - void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow); - int (*guest_x86_mode)(struct vcpu *v); - unsigned int (*get_cpl)(struct vcpu *v); - void (*get_segment_register)(struct vcpu *v, enum x86_segment seg, - struct segment_register *reg); - void (*set_segment_register)(struct vcpu *v, enum x86_segment seg, - struct segment_register *reg); - unsigned long (*get_shadow_gs_base)(struct vcpu *v); - - /* - * Re-set the value of CR3 that Xen runs on when handling VM exits. - */ - void (*update_host_cr3)(struct vcpu *v); - - /* - * Called to inform HVM layer that a guest CRn or EFER has changed. - */ - void (*update_guest_cr)(struct vcpu *v, unsigned int cr, - unsigned int flags); - void (*update_guest_efer)(struct vcpu *v); - - void (*cpuid_policy_changed)(struct vcpu *v); - - void (*fpu_leave)(struct vcpu *v); - - int (*get_guest_pat)(struct vcpu *v, u64 *); - int (*set_guest_pat)(struct vcpu *v, u64); - - bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *); - bool (*set_guest_bndcfgs)(struct vcpu *v, u64); - - void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc); - - void (*inject_event)(const struct x86_event *event); - - void (*init_hypercall_page)(void *ptr); - - bool (*event_pending)(const struct vcpu *v); - bool (*get_pending_event)(struct vcpu *v, struct x86_event *info); - void (*invlpg)(struct vcpu *v, unsigned long linear); - - int (*cpu_up_prepare)(unsigned int cpu); - void (*cpu_dead)(unsigned int cpu); - - int (*cpu_up)(void); - void (*cpu_down)(void); - - /* Copy up to 15 bytes from cached instruction bytes at current rIP. */ - unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf); - - /* Instruction intercepts: non-void return values are X86EMUL codes. */ - void (*wbinvd_intercept)(void); - void (*fpu_dirty_intercept)(void); - int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content); - int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content); - void (*handle_cd)(struct vcpu *v, unsigned long value); - void (*set_info_guest)(struct vcpu *v); - void (*set_rdtsc_exiting)(struct vcpu *v, bool_t); - void (*set_descriptor_access_exiting)(struct vcpu *v, bool); - - /* Nested HVM */ - int (*nhvm_vcpu_initialise)(struct vcpu *v); - void (*nhvm_vcpu_destroy)(struct vcpu *v); - int (*nhvm_vcpu_reset)(struct vcpu *v); - int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event *event); - uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v); - bool_t (*nhvm_vmcx_guest_intercepts_event)( - struct vcpu *v, unsigned int vector, int errcode); - - bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v); - - enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v); - void (*nhvm_domain_relinquish_resources)(struct domain *d); - - /* Virtual interrupt delivery */ - void (*update_eoi_exit_bitmap)(struct vcpu *v, uint8_t vector, bool set); - void (*process_isr)(int isr, struct vcpu *v); - void (*deliver_posted_intr)(struct vcpu *v, u8 vector); - void (*sync_pir_to_irr)(struct vcpu *v); - bool (*test_pir)(const struct vcpu *v, uint8_t vector); - void (*handle_eoi)(uint8_t vector, int isr); - - /*Walk nested p2m */ - int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa, - paddr_t *L1_gpa, unsigned int *page_order, - uint8_t *p2m_acc, bool_t access_r, - bool_t access_w, bool_t access_x); - - void (*enable_msr_interception)(struct domain *d, uint32_t msr); - bool_t (*is_singlestep_supported)(void); - - /* Alternate p2m */ - void (*altp2m_vcpu_update_p2m)(struct vcpu *v); - void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v); - bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v); - int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs); - - /* vmtrace */ - int (*vmtrace_control)(struct vcpu *v, bool enable, bool reset); - int (*vmtrace_output_position)(struct vcpu *v, uint64_t *pos); - int (*vmtrace_set_option)(struct vcpu *v, uint64_t key, uint64_t value); - int (*vmtrace_get_option)(struct vcpu *v, uint64_t key, uint64_t *value); - int (*vmtrace_reset)(struct vcpu *v); - - /* - * Parameters and callbacks for hardware-assisted TSC scaling, - * which are valid only when the hardware feature is available. - */ - struct { - /* number of bits of the fractional part of TSC scaling ratio */ - uint8_t ratio_frac_bits; - /* maximum-allowed TSC scaling ratio */ - uint64_t max_ratio; - - /* Architecture function to setup TSC scaling ratio */ - void (*setup)(struct vcpu *v); - } tsc_scaling; -}; - -extern struct hvm_function_table hvm_funcs; -extern bool_t hvm_enabled; -extern s8 hvm_port80_allowed; - -extern const struct hvm_function_table *start_svm(void); -extern const struct hvm_function_table *start_vmx(void); - -int hvm_domain_initialise(struct domain *d); -void hvm_domain_relinquish_resources(struct domain *d); -void hvm_domain_destroy(struct domain *d); - -int hvm_vcpu_initialise(struct vcpu *v); -void hvm_vcpu_destroy(struct vcpu *v); -void hvm_vcpu_down(struct vcpu *v); -int hvm_vcpu_cacheattr_init(struct vcpu *v); -void hvm_vcpu_cacheattr_destroy(struct vcpu *v); -void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip); - -void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat); -int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat); - -u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc); - -u64 hvm_scale_tsc(const struct domain *d, u64 tsc); -u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz); - -void hvm_init_guest_time(struct domain *d); -void hvm_set_guest_time(struct vcpu *v, u64 guest_time); -uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, uint64_t at_tsc); - -int vmsi_deliver( - struct domain *d, int vector, - uint8_t dest, uint8_t dest_mode, - uint8_t delivery_mode, uint8_t trig_mode); -struct hvm_pirq_dpci; -void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *); -int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode); - -enum hvm_intblk -hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack); - -void hvm_init_hypercall_page(struct domain *d, void *ptr); - -void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg, - struct segment_register *reg); -void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, - struct segment_register *reg); - -void hvm_set_info_guest(struct vcpu *v); - -bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val); - -int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len); -void hvm_migrate_timers(struct vcpu *v); -void hvm_do_resume(struct vcpu *v); -void hvm_migrate_pirq(struct hvm_pirq_dpci *pirq_dpci, const struct vcpu *v); -void hvm_migrate_pirqs(struct vcpu *v); - -void hvm_inject_event(const struct x86_event *event); - -int hvm_event_needs_reinjection(uint8_t type, uint8_t vector); - -uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2); - -void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable); - -enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int }; -void hvm_task_switch( - uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason, - int32_t errcode, unsigned int insn_len, unsigned int extra_eflags); - -enum hvm_access_type { - hvm_access_insn_fetch, - hvm_access_none, - hvm_access_read, - hvm_access_write -}; - -bool hvm_vcpu_virtual_to_linear( - struct vcpu *v, - enum x86_segment seg, - const struct segment_register *reg, - unsigned long offset, - unsigned int bytes, - enum hvm_access_type access_type, - const struct segment_register *active_cs, - unsigned long *linear_addr); - -static inline bool hvm_virtual_to_linear_addr( - enum x86_segment seg, - const struct segment_register *reg, - unsigned long offset, - unsigned int bytes, - enum hvm_access_type access_type, - const struct segment_register *active_cs, - unsigned long *linear) -{ - return hvm_vcpu_virtual_to_linear(current, seg, reg, offset, bytes, - access_type, active_cs, linear); -} - -void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent, - bool_t *writable); -void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent); -void hvm_unmap_guest_frame(void *p, bool_t permanent); -void hvm_mapped_guest_frames_mark_dirty(struct domain *); - -int hvm_debug_op(struct vcpu *v, int32_t op); - -/* Caller should pause vcpu before calling this function */ -void hvm_toggle_singlestep(struct vcpu *v); -void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx); - -struct npfec; -int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, - struct npfec npfec); - -/* Check CR4/EFER values */ -const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, - signed int cr0_pg); -unsigned long hvm_cr4_guest_valid_bits(const struct domain *d); - -int hvm_copy_context_and_params(struct domain *src, struct domain *dst); - -int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value); - -#ifdef CONFIG_HVM - -#define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0) - -#define hvm_tsc_scaling_supported \ - (!!hvm_funcs.tsc_scaling.ratio_frac_bits) - -#define hvm_default_tsc_scaling_ratio \ - (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits) - -#define hvm_tsc_scaling_ratio(d) \ - ((d)->arch.hvm.tsc_scaling_ratio) - -#define hvm_get_guest_time(v) hvm_get_guest_time_fixed(v, 0) - -#define hvm_paging_enabled(v) \ - (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_PG)) -#define hvm_wp_enabled(v) \ - (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_WP)) -#define hvm_pcid_enabled(v) \ - (!!((v)->arch.hvm.guest_cr[4] & X86_CR4_PCIDE)) -#define hvm_pae_enabled(v) \ - (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PAE)) -#define hvm_smep_enabled(v) \ - (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMEP)) -#define hvm_smap_enabled(v) \ - (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP)) -#define hvm_nx_enabled(v) \ - ((v)->arch.hvm.guest_efer & EFER_NXE) -#define hvm_pku_enabled(v) \ - (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE)) - -/* Can we use superpages in the HAP p2m table? */ -#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB)) -#define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB)) - -#define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA)) - -static inline bool hvm_has_set_descriptor_access_exiting(void) -{ - return hvm_funcs.set_descriptor_access_exiting; -} - -static inline void hvm_domain_creation_finished(struct domain *d) -{ - if ( hvm_funcs.domain_creation_finished ) - alternative_vcall(hvm_funcs.domain_creation_finished, d); -} - -static inline int -hvm_guest_x86_mode(struct vcpu *v) -{ - ASSERT(v == current); - return alternative_call(hvm_funcs.guest_x86_mode, v); -} - -static inline void -hvm_update_host_cr3(struct vcpu *v) -{ - if ( hvm_funcs.update_host_cr3 ) - alternative_vcall(hvm_funcs.update_host_cr3, v); -} - -static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr) -{ - alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0); -} - -static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush) -{ - unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0; - - alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags); -} - -static inline void hvm_update_guest_efer(struct vcpu *v) -{ - alternative_vcall(hvm_funcs.update_guest_efer, v); -} - -static inline void hvm_cpuid_policy_changed(struct vcpu *v) -{ - alternative_vcall(hvm_funcs.cpuid_policy_changed, v); -} - -static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, - uint64_t at_tsc) -{ - alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc); -} - -/* - * Called to ensure than all guest-specific mappings in a tagged TLB are - * flushed; does *not* flush Xen's TLB entries, and on processors without a - * tagged TLB it will be a noop. - */ -static inline void hvm_flush_guest_tlbs(void) -{ - if ( hvm_enabled ) - hvm_asid_flush_core(); -} - -static inline unsigned int -hvm_get_cpl(struct vcpu *v) -{ - return alternative_call(hvm_funcs.get_cpl, v); -} - -static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v) -{ - return alternative_call(hvm_funcs.get_shadow_gs_base, v); -} - -static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val) -{ - return hvm_funcs.get_guest_bndcfgs && - alternative_call(hvm_funcs.get_guest_bndcfgs, v, val); -} - -#define has_hvm_params(d) \ - ((d)->arch.hvm.params != NULL) - -#define viridian_feature_mask(d) \ - (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0) - -#define is_viridian_domain(d) \ - (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq)) - -#define is_viridian_vcpu(v) \ - is_viridian_domain((v)->domain) - -#define has_viridian_time_ref_count(d) \ - (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count)) - -#define has_viridian_apic_assist(d) \ - (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist)) - -#define has_viridian_synic(d) \ - (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_synic)) - -static inline void hvm_inject_exception( - unsigned int vector, unsigned int type, - unsigned int insn_len, int error_code) -{ - struct x86_event event = { - .vector = vector, - .type = type, - .insn_len = insn_len, - .error_code = error_code, - }; - - hvm_inject_event(&event); -} - -static inline void hvm_inject_hw_exception(unsigned int vector, int errcode) -{ - struct x86_event event = { - .vector = vector, - .type = X86_EVENTTYPE_HW_EXCEPTION, - .error_code = errcode, - }; - - hvm_inject_event(&event); -} - -static inline void hvm_inject_page_fault(int errcode, unsigned long cr2) -{ - struct x86_event event = { - .vector = TRAP_page_fault, - .type = X86_EVENTTYPE_HW_EXCEPTION, - .error_code = errcode, - .cr2 = cr2, - }; - - hvm_inject_event(&event); -} - -static inline bool hvm_event_pending(const struct vcpu *v) -{ - return alternative_call(hvm_funcs.event_pending, v); -} - -static inline void hvm_invlpg(struct vcpu *v, unsigned long linear) -{ - alternative_vcall(hvm_funcs.invlpg, v, linear); -} - -/* These bits in CR4 are owned by the host. */ -#define HVM_CR4_HOST_MASK (mmu_cr4_features & \ - (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE)) - -/* These exceptions must always be intercepted. */ -#define HVM_TRAP_MASK ((1U << TRAP_debug) | \ - (1U << TRAP_alignment_check) | \ - (1U << TRAP_machine_check)) - -static inline int hvm_cpu_up(void) -{ - return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0); -} - -static inline void hvm_cpu_down(void) -{ - if ( hvm_funcs.cpu_down ) - hvm_funcs.cpu_down(); -} - -static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf) -{ - return (hvm_funcs.get_insn_bytes - ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0); -} - -static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs) -{ -#ifndef NDEBUG - regs->error_code = 0xbeef; - regs->entry_vector = 0xbeef; - regs->saved_upcall_mask = 0xbf; - regs->cs = 0xbeef; - regs->ss = 0xbeef; - regs->ds = 0xbeef; - regs->es = 0xbeef; - regs->fs = 0xbeef; - regs->gs = 0xbeef; -#endif -} - -/* - * Nested HVM - */ - -/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to - * 'trapnr' exception. - */ -static inline int nhvm_vcpu_vmexit_event( - struct vcpu *v, const struct x86_event *event) -{ - return hvm_funcs.nhvm_vcpu_vmexit_event(v, event); -} - -/* returns l1 guest's cr3 that points to the page table used to - * translate l2 guest physical address to l1 guest physical address. - */ -static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v) -{ - return hvm_funcs.nhvm_vcpu_p2m_base(v); -} - -/* returns true, when l1 guest intercepts the specified trap */ -static inline bool_t nhvm_vmcx_guest_intercepts_event( - struct vcpu *v, unsigned int vector, int errcode) -{ - return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode); -} - -/* returns true when l1 guest wants to use hap to run l2 guest */ -static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v) -{ - return hvm_funcs.nhvm_vmcx_hap_enabled(v); -} - -/* interrupt */ -static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v) -{ - return hvm_funcs.nhvm_intr_blocked(v); -} - -static inline bool_t hvm_enable_msr_interception(struct domain *d, uint32_t msr) -{ - if ( hvm_funcs.enable_msr_interception ) - { - hvm_funcs.enable_msr_interception(d, msr); - return 1; - } - - return 0; -} - -static inline bool_t hvm_is_singlestep_supported(void) -{ - return (hvm_funcs.is_singlestep_supported && - hvm_funcs.is_singlestep_supported()); -} - -static inline bool hvm_hap_supported(void) -{ - return hvm_funcs.hap_supported; -} - -/* returns true if hardware supports alternate p2m's */ -static inline bool hvm_altp2m_supported(void) -{ - return hvm_funcs.altp2m_supported; -} - -/* updates the current hardware p2m */ -static inline void altp2m_vcpu_update_p2m(struct vcpu *v) -{ - if ( hvm_funcs.altp2m_vcpu_update_p2m ) - hvm_funcs.altp2m_vcpu_update_p2m(v); -} - -/* updates VMCS fields related to VMFUNC and #VE */ -static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v) -{ - if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve ) - hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v); -} - -/* emulates #VE */ -static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v) -{ - if ( hvm_funcs.altp2m_vcpu_emulate_ve ) - { - hvm_funcs.altp2m_vcpu_emulate_ve(v); - return true; - } - return false; -} - -static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset) -{ - if ( hvm_funcs.vmtrace_control ) - return hvm_funcs.vmtrace_control(v, enable, reset); - - return -EOPNOTSUPP; -} - -/* Returns -errno, or a boolean of whether tracing is currently active. */ -static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos) -{ - if ( hvm_funcs.vmtrace_output_position ) - return hvm_funcs.vmtrace_output_position(v, pos); - - return -EOPNOTSUPP; -} - -static inline int hvm_vmtrace_set_option( - struct vcpu *v, uint64_t key, uint64_t value) -{ - if ( hvm_funcs.vmtrace_set_option ) - return hvm_funcs.vmtrace_set_option(v, key, value); - - return -EOPNOTSUPP; -} - -static inline int hvm_vmtrace_get_option( - struct vcpu *v, uint64_t key, uint64_t *value) -{ - if ( hvm_funcs.vmtrace_get_option ) - return hvm_funcs.vmtrace_get_option(v, key, value); - - return -EOPNOTSUPP; -} - -static inline int hvm_vmtrace_reset(struct vcpu *v) -{ - if ( hvm_funcs.vmtrace_reset ) - return hvm_funcs.vmtrace_reset(v); - - return -EOPNOTSUPP; -} - -/* - * This must be defined as a macro instead of an inline function, - * because it uses 'struct vcpu' and 'struct domain' which have - * not been defined yet. - */ -#define arch_vcpu_block(v) ({ \ - struct vcpu *v_ = (v); \ - struct domain *d_ = v_->domain; \ - if ( is_hvm_domain(d_) && d_->arch.hvm.pi_ops.vcpu_block ) \ - d_->arch.hvm.pi_ops.vcpu_block(v_); \ -}) - -#else /* CONFIG_HVM */ - -#define hvm_enabled false - -/* - * List of inline functions above, of which only declarations are - * needed because DCE will kick in. - */ -int hvm_guest_x86_mode(struct vcpu *v); -unsigned long hvm_get_shadow_gs_base(struct vcpu *v); -void hvm_cpuid_policy_changed(struct vcpu *v); -void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc); -bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val); - -/* End of prototype list */ - -/* Called by code in other header */ -static inline bool hvm_is_singlestep_supported(void) -{ - return false; -} - -static inline bool hvm_hap_supported(void) -{ - return false; -} - -static inline bool nhvm_vmcx_hap_enabled(const struct vcpu *v) -{ - ASSERT_UNREACHABLE(); - return false; -} - - -/* Called by common code */ -static inline int hvm_cpu_up(void) -{ - return 0; -} - -static inline void hvm_cpu_down(void) {} - -static inline void hvm_flush_guest_tlbs(void) {} - -static inline void hvm_invlpg(const struct vcpu *v, unsigned long linear) -{ - ASSERT_UNREACHABLE(); -} - -static inline void hvm_domain_creation_finished(struct domain *d) -{ - ASSERT_UNREACHABLE(); -} - -/* - * Shadow code needs further cleanup to eliminate some HVM-only paths. For - * now provide the stubs here but assert they will never be reached. - */ -static inline void hvm_update_host_cr3(const struct vcpu *v) -{ - ASSERT_UNREACHABLE(); -} - -static inline void hvm_update_guest_cr3(const struct vcpu *v, bool noflush) -{ - ASSERT_UNREACHABLE(); -} - -static inline unsigned int hvm_get_cpl(const struct vcpu *v) -{ - ASSERT_UNREACHABLE(); - return -1; -} - -static inline bool hvm_event_pending(const struct vcpu *v) -{ - return false; -} - -static inline void hvm_inject_hw_exception(unsigned int vector, int errcode) -{ - ASSERT_UNREACHABLE(); -} - -static inline bool hvm_has_set_descriptor_access_exiting(void) -{ - return false; -} - -static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset) -{ - return -EOPNOTSUPP; -} - -static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos) -{ - return -EOPNOTSUPP; -} - -static inline int hvm_vmtrace_set_option( - struct vcpu *v, uint64_t key, uint64_t value) -{ - return -EOPNOTSUPP; -} - -static inline int hvm_vmtrace_get_option( - struct vcpu *v, uint64_t key, uint64_t *value) -{ - return -EOPNOTSUPP; -} - -#define is_viridian_domain(d) ((void)(d), false) -#define is_viridian_vcpu(v) ((void)(v), false) -#define has_viridian_time_ref_count(d) ((void)(d), false) -#define hvm_long_mode_active(v) ((void)(v), false) -#define hvm_get_guest_time(v) ((void)(v), 0) - -#define hvm_tsc_scaling_supported false -#define hap_has_1gb false -#define hap_has_2mb false - -#define hvm_paging_enabled(v) ((void)(v), false) -#define hvm_wp_enabled(v) ((void)(v), false) -#define hvm_pcid_enabled(v) ((void)(v), false) -#define hvm_pae_enabled(v) ((void)(v), false) -#define hvm_smep_enabled(v) ((void)(v), false) -#define hvm_smap_enabled(v) ((void)(v), false) -#define hvm_nx_enabled(v) ((void)(v), false) -#define hvm_pku_enabled(v) ((void)(v), false) - -#define arch_vcpu_block(v) ((void)(v)) - -#endif /* CONFIG_HVM */ - -#endif /* __ASM_X86_HVM_HVM_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h deleted file mode 100644 index 54e0161b49..0000000000 --- a/xen/include/asm-x86/hvm/io.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - * io.h: HVM IO support - * - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_IO_H__ -#define __ASM_X86_HVM_IO_H__ - -#include -#include - -#define NR_IO_HANDLERS 32 - -typedef int (*hvm_mmio_read_t)(struct vcpu *v, - unsigned long addr, - unsigned int length, - unsigned long *val); -typedef int (*hvm_mmio_write_t)(struct vcpu *v, - unsigned long addr, - unsigned int length, - unsigned long val); -typedef int (*hvm_mmio_check_t)(struct vcpu *v, unsigned long addr); - -struct hvm_mmio_ops { - hvm_mmio_check_t check; - hvm_mmio_read_t read; - hvm_mmio_write_t write; -}; - -typedef int (*portio_action_t)( - int dir, unsigned int port, unsigned int bytes, uint32_t *val); - -struct hvm_io_handler { - union { - struct { - const struct hvm_mmio_ops *ops; - } mmio; - struct { - unsigned int port, size; - portio_action_t action; - } portio; - }; - const struct hvm_io_ops *ops; - uint8_t type; -}; - -typedef int (*hvm_io_read_t)(const struct hvm_io_handler *, - uint64_t addr, - uint32_t size, - uint64_t *data); -typedef int (*hvm_io_write_t)(const struct hvm_io_handler *, - uint64_t addr, - uint32_t size, - uint64_t data); -typedef bool_t (*hvm_io_accept_t)(const struct hvm_io_handler *, - const ioreq_t *p); -typedef void (*hvm_io_complete_t)(const struct hvm_io_handler *); - -struct hvm_io_ops { - hvm_io_accept_t accept; - hvm_io_read_t read; - hvm_io_write_t write; - hvm_io_complete_t complete; -}; - -int hvm_process_io_intercept(const struct hvm_io_handler *handler, - ioreq_t *p); - -int hvm_io_intercept(ioreq_t *p); - -struct hvm_io_handler *hvm_next_io_handler(struct domain *d); - -bool_t hvm_mmio_internal(paddr_t gpa); - -void register_mmio_handler(struct domain *d, - const struct hvm_mmio_ops *ops); - -void register_portio_handler( - struct domain *d, unsigned int port, unsigned int size, - portio_action_t action); - -bool relocate_portio_handler( - struct domain *d, unsigned int old_port, unsigned int new_port, - unsigned int size); - -void send_timeoffset_req(unsigned long timeoff); -bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, - struct npfec); -bool handle_pio(uint16_t port, unsigned int size, int dir); -void hvm_interrupt_post(struct vcpu *v, int vector, int type); -void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq); -void msix_write_completion(struct vcpu *); - -#ifdef CONFIG_HVM -void msixtbl_init(struct domain *d); -#else -static inline void msixtbl_init(struct domain *d) {} -#endif - -/* Arch-specific MSI data for vPCI. */ -struct vpci_arch_msi { - int pirq; - bool bound; -}; - -/* Arch-specific MSI-X entry data for vPCI. */ -struct vpci_arch_msix_entry { - int pirq; -}; - -enum stdvga_cache_state { - STDVGA_CACHE_UNINITIALIZED, - STDVGA_CACHE_ENABLED, - STDVGA_CACHE_DISABLED -}; - -struct hvm_hw_stdvga { - uint8_t sr_index; - uint8_t sr[8]; - uint8_t gr_index; - uint8_t gr[9]; - bool_t stdvga; - enum stdvga_cache_state cache; - uint32_t latch; - struct page_info *vram_page[64]; /* shadow of 0xa0000-0xaffff */ - spinlock_t lock; -}; - -void stdvga_init(struct domain *d); -void stdvga_deinit(struct domain *d); - -extern void hvm_dpci_msi_eoi(struct domain *d, int vector); - -/* Decode a PCI port IO access into a bus/slot/func/reg. */ -unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr, - pci_sbdf_t *sbdf); - -/* - * HVM port IO handler that performs forwarding of guest IO ports into machine - * IO ports. - */ -void register_g2m_portio_handler(struct domain *d); - -/* HVM port IO handler for vPCI accesses. */ -void register_vpci_portio_handler(struct domain *d); - -/* HVM MMIO handler for PCI MMCFG accesses. */ -int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr, - unsigned int start_bus, unsigned int end_bus, - unsigned int seg); -/* Destroy tracked MMCFG areas. */ -void destroy_vpci_mmcfg(struct domain *d); - -/* Check if an address is between a MMCFG region for a domain. */ -bool vpci_is_mmcfg_address(const struct domain *d, paddr_t addr); - -#endif /* __ASM_X86_HVM_IO_H__ */ - - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h deleted file mode 100644 index 9b2eb6fedf..0000000000 --- a/xen/include/asm-x86/hvm/ioreq.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * hvm.h: Hardware virtual machine assist interface definitions. - * - * Copyright (c) 2016 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_IOREQ_H__ -#define __ASM_X86_HVM_IOREQ_H__ - -/* This correlation must not be altered */ -#define IOREQ_STATUS_HANDLED X86EMUL_OKAY -#define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE -#define IOREQ_STATUS_RETRY X86EMUL_RETRY - -#endif /* __ASM_X86_HVM_IOREQ_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h deleted file mode 100644 index c4369ceb7a..0000000000 --- a/xen/include/asm-x86/hvm/irq.h +++ /dev/null @@ -1,227 +0,0 @@ -/****************************************************************************** - * irq.h - * - * Interrupt distribution and delivery logic. - * - * Copyright (c) 2006, K A Fraser, XenSource Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_IRQ_H__ -#define __ASM_X86_HVM_IRQ_H__ - -#include - -#include -#include -#include - -struct hvm_irq { - /* - * Virtual interrupt wires for a single PCI bus. - * Indexed by: device*4 + INTx#. - */ - struct hvm_hw_pci_irqs pci_intx; - - /* - * Virtual interrupt wires for ISA devices. - * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). - */ - struct hvm_hw_isa_irqs isa_irq; - - /* - * PCI-ISA interrupt router. - * Each PCI is 'wire-ORed' into one of four links using - * the traditional 'barber's pole' mapping ((device + INTx#) & 3). - * The router provides a programmable mapping from each link to a GSI. - */ - struct hvm_hw_pci_link pci_link; - - /* Virtual interrupt and via-link for paravirtual platform driver. */ - uint32_t callback_via_asserted; - union { - enum { - HVMIRQ_callback_none, - HVMIRQ_callback_gsi, - HVMIRQ_callback_pci_intx, - HVMIRQ_callback_vector - } callback_via_type; - }; - union { - uint32_t gsi; - struct { uint8_t dev, intx; } pci; - uint32_t vector; - } callback_via; - - /* Number of INTx wires asserting each PCI-ISA link. */ - u8 pci_link_assert_count[4]; - - /* - * GSIs map onto PIC/IO-APIC in the usual way: - * 0-7: Master 8259 PIC, IO-APIC pins 0-7 - * 8-15: Slave 8259 PIC, IO-APIC pins 8-15 - * 16+ : IO-APIC pins 16+ - */ - - /* Last VCPU that was delivered a LowestPrio interrupt. */ - u8 round_robin_prev_vcpu; - - struct hvm_irq_dpci *dpci; - - /* - * Number of wires asserting each GSI. - * - * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space - * except ISA IRQ 0, which is connected to GSI 2. - * PCI links map into this space via the PCI-ISA bridge. - * - * GSIs 16+ are used only be PCI devices. The mapping from PCI device to - * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 - */ - unsigned int nr_gsis; - u8 gsi_assert_count[]; -}; - -#define hvm_pci_intx_gsi(dev, intx) \ - (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16) -#define hvm_pci_intx_link(dev, intx) \ - (((dev) + (intx)) & 3) -#define hvm_domain_irq(d) ((d)->arch.hvm.irq) - -#define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2) - -/* Check/Acknowledge next pending interrupt. */ -struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v); -struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v, - struct hvm_intack intack); - -struct dev_intx_gsi_link { - struct list_head list; - uint8_t bus; - uint8_t device; - uint8_t intx; -}; - -#define _HVM_IRQ_DPCI_MACH_PCI_SHIFT 0 -#define _HVM_IRQ_DPCI_MACH_MSI_SHIFT 1 -#define _HVM_IRQ_DPCI_MAPPED_SHIFT 2 -#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4 -#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5 -#define _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT 6 -#define _HVM_IRQ_DPCI_NO_EOI_SHIFT 7 -#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15 -#define HVM_IRQ_DPCI_MACH_PCI (1u << _HVM_IRQ_DPCI_MACH_PCI_SHIFT) -#define HVM_IRQ_DPCI_MACH_MSI (1u << _HVM_IRQ_DPCI_MACH_MSI_SHIFT) -#define HVM_IRQ_DPCI_MAPPED (1u << _HVM_IRQ_DPCI_MAPPED_SHIFT) -#define HVM_IRQ_DPCI_GUEST_PCI (1u << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT) -#define HVM_IRQ_DPCI_GUEST_MSI (1u << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT) -#define HVM_IRQ_DPCI_IDENTITY_GSI (1u << _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT) -#define HVM_IRQ_DPCI_NO_EOI (1u << _HVM_IRQ_DPCI_NO_EOI_SHIFT) -#define HVM_IRQ_DPCI_TRANSLATE (1u << _HVM_IRQ_DPCI_TRANSLATE_SHIFT) - -struct hvm_gmsi_info { - uint32_t gvec; - uint32_t gflags; - int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */ - bool posted; /* directly deliver to guest via VT-d PI? */ -}; - -struct hvm_girq_dpci_mapping { - struct list_head list; - uint8_t bus; - uint8_t device; - uint8_t intx; - uint8_t machine_gsi; -}; - -#define NR_ISAIRQS 16 -#define NR_LINK 4 -#define NR_HVM_DOMU_IRQS ARRAY_SIZE(((struct hvm_hw_vioapic *)0)->redirtbl) - -/* Protected by domain's event_lock */ -struct hvm_irq_dpci { - /* Guest IRQ to guest device/intx mapping. */ - struct list_head girq[NR_HVM_DOMU_IRQS]; - /* Record of mapped ISA IRQs */ - DECLARE_BITMAP(isairq_map, NR_ISAIRQS); - /* Record of mapped Links */ - uint8_t link_cnt[NR_LINK]; -}; - -/* Machine IRQ to guest device/intx mapping. */ -struct hvm_pirq_dpci { - uint32_t flags; - unsigned int state; - bool masked; - uint16_t pending; - struct list_head digl_list; - struct domain *dom; - struct hvm_gmsi_info gmsi; - struct list_head softirq_list; -}; - -void pt_pirq_init(struct domain *, struct hvm_pirq_dpci *); -bool pt_pirq_cleanup_check(struct hvm_pirq_dpci *); -int pt_pirq_iterate(struct domain *d, - int (*cb)(struct domain *, - struct hvm_pirq_dpci *, void *arg), - void *arg); - -#ifdef CONFIG_HVM -bool pt_pirq_softirq_active(struct hvm_pirq_dpci *); -#else -static inline bool pt_pirq_softirq_active(struct hvm_pirq_dpci *dpci) -{ - return false; -} -#endif - -/* Modify state of a PCI INTx wire. */ -void hvm_pci_intx_assert(struct domain *d, unsigned int device, - unsigned int intx); -void hvm_pci_intx_deassert(struct domain *d, unsigned int device, - unsigned int intx); - -/* - * Modify state of an ISA device's IRQ wire. For some cases, we are - * interested in the interrupt vector of the irq, but once the irq_lock - * is released, the vector may be changed by others. get_vector() callback - * allows us to get the interrupt vector in the protection of irq_lock. - * For most cases, just set get_vector to NULL. - */ -int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq, - int (*get_vector)(const struct domain *d, - unsigned int gsi)); -void hvm_isa_irq_deassert(struct domain *d, unsigned int isa_irq); - -/* Modify state of GSIs. */ -void hvm_gsi_assert(struct domain *d, unsigned int gsi); -void hvm_gsi_deassert(struct domain *d, unsigned int gsi); - -int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq); - -int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data); - -/* Assert/deassert an IO APIC pin. */ -int hvm_ioapic_assert(struct domain *d, unsigned int gsi, bool level); -void hvm_ioapic_deassert(struct domain *d, unsigned int gsi); - -void hvm_maybe_deassert_evtchn_irq(void); -void hvm_assert_evtchn_irq(struct vcpu *v); -void hvm_set_callback_via(struct domain *d, uint64_t via); - -struct pirq; -bool hvm_domain_use_pirq(const struct domain *, const struct pirq *); - -#endif /* __ASM_X86_HVM_IRQ_H__ */ diff --git a/xen/include/asm-x86/hvm/monitor.h b/xen/include/asm-x86/hvm/monitor.h deleted file mode 100644 index a75cd8545c..0000000000 --- a/xen/include/asm-x86/hvm/monitor.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * include/asm-x86/hvm/monitor.h - * - * Arch-specific hardware virtual machine monitor abstractions. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_MONITOR_H__ -#define __ASM_X86_HVM_MONITOR_H__ - -#include - -enum hvm_monitor_debug_type -{ - HVM_MONITOR_SOFTWARE_BREAKPOINT, - HVM_MONITOR_SINGLESTEP_BREAKPOINT, - HVM_MONITOR_DEBUG_EXCEPTION, -}; - -/* - * Called for current VCPU on crX/MSR changes by guest. Bool return signals - * whether emulation should be postponed. - */ -bool hvm_monitor_cr(unsigned int index, unsigned long value, - unsigned long old); -#define hvm_monitor_crX(cr, new, old) \ - hvm_monitor_cr(VM_EVENT_X86_##cr, new, old) -bool hvm_monitor_msr(unsigned int msr, uint64_t value, uint64_t old_value); -void hvm_monitor_descriptor_access(uint64_t exit_info, - uint64_t vmx_exit_qualification, - uint8_t descriptor, bool is_write); -int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type, - unsigned int trap_type, unsigned int insn_length, - unsigned int pending_dbg); -int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf, - unsigned int subleaf); -void hvm_monitor_interrupt(unsigned int vector, unsigned int type, - unsigned int err, uint64_t cr2); -bool hvm_monitor_emul_unimplemented(void); - -bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec, - uint16_t kind); - -#endif /* __ASM_X86_HVM_MONITOR_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/nestedhvm.h b/xen/include/asm-x86/hvm/nestedhvm.h deleted file mode 100644 index d263925786..0000000000 --- a/xen/include/asm-x86/hvm/nestedhvm.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Nested HVM - * Copyright (c) 2011, Advanced Micro Devices, Inc. - * Author: Christoph Egger - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef _HVM_NESTEDHVM_H -#define _HVM_NESTEDHVM_H - -#include /* for uintNN_t */ -#include /* for struct vcpu, struct domain */ -#include /* for vcpu_nestedhvm */ -#include - -enum nestedhvm_vmexits { - NESTEDHVM_VMEXIT_ERROR = 0, /* inject VMEXIT w/ invalid VMCB */ - NESTEDHVM_VMEXIT_FATALERROR = 1, /* crash first level guest */ - NESTEDHVM_VMEXIT_HOST = 2, /* exit handled on host level */ - NESTEDHVM_VMEXIT_CONTINUE = 3, /* further handling */ - NESTEDHVM_VMEXIT_INJECT = 4, /* inject VMEXIT */ - NESTEDHVM_VMEXIT_DONE = 5, /* VMEXIT handled */ -}; - -/* Nested HVM on/off per domain */ -static inline bool nestedhvm_enabled(const struct domain *d) -{ - return IS_ENABLED(CONFIG_HVM) && (d->options & XEN_DOMCTL_CDF_nested_virt); -} - -/* Nested VCPU */ -int nestedhvm_vcpu_initialise(struct vcpu *v); -void nestedhvm_vcpu_destroy(struct vcpu *v); -void nestedhvm_vcpu_reset(struct vcpu *v); -bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v); -#define nestedhvm_vcpu_enter_guestmode(v) \ - vcpu_nestedhvm(v).nv_guestmode = 1 -#define nestedhvm_vcpu_exit_guestmode(v) \ - vcpu_nestedhvm(v).nv_guestmode = 0 - -/* Nested paging */ -#define NESTEDHVM_PAGEFAULT_DONE 0 -#define NESTEDHVM_PAGEFAULT_INJECT 1 -#define NESTEDHVM_PAGEFAULT_L1_ERROR 2 -#define NESTEDHVM_PAGEFAULT_L0_ERROR 3 -#define NESTEDHVM_PAGEFAULT_MMIO 4 -#define NESTEDHVM_PAGEFAULT_RETRY 5 -#define NESTEDHVM_PAGEFAULT_DIRECT_MMIO 6 -int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, - bool_t access_r, bool_t access_w, bool_t access_x); - -int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, - unsigned int *page_order, uint8_t *p2m_acc, - bool_t access_r, bool_t access_w, bool_t access_x); - -/* IO permission map */ -unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed); - -/* Misc */ -#define nestedhvm_paging_mode_hap(v) (!!nhvm_vmcx_hap_enabled(v)) -#define nestedhvm_vmswitch_in_progress(v) \ - (!!vcpu_nestedhvm((v)).nv_vmswitch_in_progress) - -void nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m); - -static inline bool nestedhvm_is_n2(struct vcpu *v) -{ - if ( !nestedhvm_enabled(v->domain) || - nestedhvm_vmswitch_in_progress(v) || - !nestedhvm_paging_mode_hap(v) ) - return false; - - return nestedhvm_vcpu_in_guestmode(v); -} - -static inline void nestedhvm_set_cr(struct vcpu *v, unsigned int cr, - unsigned long value) -{ - if ( !nestedhvm_vmswitch_in_progress(v) && - nestedhvm_vcpu_in_guestmode(v) ) - v->arch.hvm.nvcpu.guest_cr[cr] = value; -} - -static inline bool vvmcx_valid(const struct vcpu *v) -{ - return vcpu_nestedhvm(v).nv_vvmcxaddr != INVALID_PADDR; -} - -#endif /* _HVM_NESTEDHVM_H */ diff --git a/xen/include/asm-x86/hvm/save.h b/xen/include/asm-x86/hvm/save.h deleted file mode 100644 index 4efc535055..0000000000 --- a/xen/include/asm-x86/hvm/save.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * save.h: HVM support routines for save/restore - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __XEN_HVM_SAVE_H__ -#define __XEN_HVM_SAVE_H__ - -#include -#include -#include -#include - -/* Marshalling and unmarshalling uses a buffer with size and cursor. */ -typedef struct hvm_domain_context { - uint32_t cur; - uint32_t size; - uint8_t *data; -} hvm_domain_context_t; - -/* Marshalling an entry: check space and fill in the header */ -int _hvm_init_entry(struct hvm_domain_context *h, - uint16_t tc, uint16_t inst, uint32_t len); - -/* Marshalling: copy the contents in a type-safe way */ -void _hvm_write_entry(struct hvm_domain_context *h, - void *src, uint32_t src_len); - -/* Marshalling: init and copy; evaluates to zero on success */ -#define hvm_save_entry(_x, _inst, _h, _src) ({ \ - int r; \ - r = _hvm_init_entry((_h), HVM_SAVE_CODE(_x), \ - (_inst), HVM_SAVE_LENGTH(_x)); \ - if ( r == 0 ) \ - _hvm_write_entry((_h), (_src), HVM_SAVE_LENGTH(_x)); \ - r; }) - -/* Unmarshalling: test an entry's size and typecode and record the instance */ -int _hvm_check_entry(struct hvm_domain_context *h, - uint16_t type, uint32_t len, bool_t strict_length); - -/* Unmarshalling: copy the contents in a type-safe way */ -void _hvm_read_entry(struct hvm_domain_context *h, - void *dest, uint32_t dest_len); - -/* - * Unmarshalling: check, then copy. Evaluates to zero on success. This load - * function requires the save entry to be the same size as the dest structure. - */ -#define _hvm_load_entry(_x, _h, _dst, _strict) ({ \ - int r; \ - struct hvm_save_descriptor *desc \ - = (struct hvm_save_descriptor *)&(_h)->data[(_h)->cur]; \ - if ( (r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), \ - HVM_SAVE_LENGTH(_x), (_strict))) == 0 ) \ - { \ - _hvm_read_entry((_h), (_dst), HVM_SAVE_LENGTH(_x)); \ - if ( HVM_SAVE_HAS_COMPAT(_x) && \ - desc->length != HVM_SAVE_LENGTH(_x) ) \ - r = HVM_SAVE_FIX_COMPAT(_x, (_dst), desc->length); \ - } \ - else if (HVM_SAVE_HAS_COMPAT(_x) \ - && (r = _hvm_check_entry((_h), HVM_SAVE_CODE(_x), \ - HVM_SAVE_LENGTH_COMPAT(_x), (_strict))) == 0 ) { \ - _hvm_read_entry((_h), (_dst), HVM_SAVE_LENGTH_COMPAT(_x)); \ - r = HVM_SAVE_FIX_COMPAT(_x, (_dst), desc->length); \ - } \ - r; }) - -#define hvm_load_entry(_x, _h, _dst) \ - _hvm_load_entry(_x, _h, _dst, 1) -#define hvm_load_entry_zeroextend(_x, _h, _dst) \ - _hvm_load_entry(_x, _h, _dst, 0) - -/* Unmarshalling: what is the instance ID of the next entry? */ -static inline unsigned int hvm_load_instance(const struct hvm_domain_context *h) -{ - const struct hvm_save_descriptor *d = (const void *)&h->data[h->cur]; - - return d->instance; -} - -/* Handler types for different types of save-file entry. - * The save handler may save multiple instances of a type into the buffer; - * the load handler will be called once for each instance found when - * restoring. Both return non-zero on error. */ -typedef int (*hvm_save_handler) (struct vcpu *v, - hvm_domain_context_t *h); -typedef int (*hvm_load_handler) (struct domain *d, - hvm_domain_context_t *h); - -/* Init-time function to declare a pair of handlers for a type, - * and the maximum buffer space needed to save this type of state */ -void hvm_register_savevm(uint16_t typecode, - const char *name, - hvm_save_handler save_state, - hvm_load_handler load_state, - size_t size, int kind); - -/* The space needed for saving can be per-domain or per-vcpu: */ -#define HVMSR_PER_DOM 0 -#define HVMSR_PER_VCPU 1 - -/* Syntactic sugar around that function: specify the max number of - * saves, and this calculates the size of buffer needed */ -#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k) \ -static int __init __hvm_register_##_x##_save_and_restore(void) \ -{ \ - hvm_register_savevm(HVM_SAVE_CODE(_x), \ - #_x, \ - &_save, \ - &_load, \ - (_num) * (HVM_SAVE_LENGTH(_x) \ - + sizeof (struct hvm_save_descriptor)), \ - _k); \ - return 0; \ -} \ -__initcall(__hvm_register_##_x##_save_and_restore); - - -/* Entry points for saving and restoring HVM domain state */ -size_t hvm_save_size(struct domain *d); -int hvm_save(struct domain *d, hvm_domain_context_t *h); -int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, - XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz); -int hvm_load(struct domain *d, hvm_domain_context_t *h); - -/* Arch-specific definitions. */ -struct hvm_save_header; -void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr); -int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr); - -#endif /* __XEN_HVM_SAVE_H__ */ diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h deleted file mode 100644 index 6b583738ec..0000000000 --- a/xen/include/asm-x86/hvm/support.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - * support.h: HVM support routines used by VT-x and SVM. - * - * Leendert van Doorn, leendert@watson.ibm.com - * Copyright (c) 2005, International Business Machines Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_SUPPORT_H__ -#define __ASM_X86_HVM_SUPPORT_H__ - -#include -#include -#include -#include -#include - -#ifndef NDEBUG -#define DBG_LEVEL_0 (1 << 0) -#define DBG_LEVEL_1 (1 << 1) -#define DBG_LEVEL_2 (1 << 2) -#define DBG_LEVEL_3 (1 << 3) -#define DBG_LEVEL_IO (1 << 4) -#define DBG_LEVEL_VMMU (1 << 5) -#define DBG_LEVEL_VLAPIC (1 << 6) -#define DBG_LEVEL_VLAPIC_TIMER (1 << 7) -#define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8) -#define DBG_LEVEL_IOAPIC (1 << 9) -#define DBG_LEVEL_HCALL (1 << 10) -#define DBG_LEVEL_MSR (1 << 11) - -extern unsigned int opt_hvm_debug_level; -#define HVM_DBG_LOG(level, _f, _a...) \ - do { \ - if ( unlikely((level) & opt_hvm_debug_level) ) \ - printk("[HVM:%d.%d] <%s> " _f "\n", \ - current->domain->domain_id, current->vcpu_id, __func__, \ - ## _a); \ - } while (0) -#else -#define HVM_DBG_LOG(level, _f, _a...) do {} while (0) -#endif - -extern unsigned long hvm_io_bitmap[]; - -enum hvm_translation_result { - HVMTRANS_okay, - HVMTRANS_bad_linear_to_gfn, - HVMTRANS_bad_gfn_to_mfn, - HVMTRANS_unhandleable, - HVMTRANS_gfn_paged_out, - HVMTRANS_gfn_shared, - HVMTRANS_need_retry, -}; - -/* - * Copy to/from a guest physical address. - * Returns HVMTRANS_okay, else HVMTRANS_bad_gfn_to_mfn if the given physical - * address range does not map entirely onto ordinary machine memory. - */ -enum hvm_translation_result hvm_copy_to_guest_phys( - paddr_t paddr, void *buf, unsigned int size, struct vcpu *v); -enum hvm_translation_result hvm_copy_from_guest_phys( - void *buf, paddr_t paddr, unsigned int size); - -/* - * Copy to/from a guest linear address. @pfec should include PFEC_user_mode - * if emulating a user-mode access (CPL=3). All other flags in @pfec are - * managed by the called function: it is therefore optional for the caller - * to set them. - * - * Returns: - * HVMTRANS_okay: Copy was entirely successful. - * HVMTRANS_bad_gfn_to_mfn: Some guest physical address did not map to - * ordinary machine memory. - * HVMTRANS_bad_linear_to_gfn: Some guest linear address did not have a - * valid mapping to a guest physical address. - * The pagefault_info_t structure will be filled - * in if provided. - */ -typedef struct pagefault_info -{ - unsigned long linear; - int ec; -} pagefault_info_t; - -enum hvm_translation_result hvm_copy_to_guest_linear( - unsigned long addr, void *buf, unsigned int size, uint32_t pfec, - pagefault_info_t *pfinfo); -enum hvm_translation_result hvm_copy_from_guest_linear( - void *buf, unsigned long addr, unsigned int size, uint32_t pfec, - pagefault_info_t *pfinfo); -enum hvm_translation_result hvm_copy_from_vcpu_linear( - void *buf, unsigned long addr, unsigned int size, struct vcpu *v, - unsigned int pfec); - -/* - * Get a reference on the page under an HVM physical or linear address. If - * linear, a pagewalk is performed using pfec (fault details optionally in - * pfinfo). - * On success, returns HVMTRANS_okay with a reference taken on **_page. - */ -enum hvm_translation_result hvm_translate_get_page( - struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec, - pagefault_info_t *pfinfo, struct page_info **page_p, - gfn_t *gfn_p, p2m_type_t *p2mt_p); - -#define HVM_HCALL_completed 0 /* hypercall completed - no further action */ -#define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */ -int hvm_hypercall(struct cpu_user_regs *regs); - -void hvm_hlt(unsigned int eflags); -void hvm_triple_fault(void); - -#define VM86_TSS_UPDATED (1ULL << 63) -void hvm_prepare_vm86_tss(struct vcpu *v, uint32_t base, uint32_t limit); - -void hvm_rdtsc_intercept(struct cpu_user_regs *regs); - -int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv); - -void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value); - -/* - * These functions all return X86EMUL return codes. For hvm_set_*(), the - * caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is - * returned. - */ -int hvm_set_efer(uint64_t value); -int hvm_set_cr0(unsigned long value, bool may_defer); -int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer); -int hvm_set_cr4(unsigned long value, bool may_defer); -int hvm_descriptor_access_intercept(uint64_t exit_info, - uint64_t vmx_exit_qualification, - unsigned int descriptor, bool is_write); -int hvm_mov_to_cr(unsigned int cr, unsigned int gpr); -int hvm_mov_from_cr(unsigned int cr, unsigned int gpr); -void hvm_ud_intercept(struct cpu_user_regs *); - -/* - * May return X86EMUL_EXCEPTION, at which point the caller is responsible for - * injecting a #GP fault. Used to support speculative reads. - */ -int __must_check hvm_msr_read_intercept( - unsigned int msr, uint64_t *msr_content); -int __must_check hvm_msr_write_intercept( - unsigned int msr, uint64_t msr_content, bool may_defer); - -#endif /* __ASM_X86_HVM_SUPPORT_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/svm/asid.h b/xen/include/asm-x86/hvm/svm/asid.h deleted file mode 100644 index 0e5ec3ab78..0000000000 --- a/xen/include/asm-x86/hvm/svm/asid.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * asid.h: handling ASIDs in SVM. - * Copyright (c) 2007, Advanced Micro Devices, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_SVM_ASID_H__ -#define __ASM_X86_HVM_SVM_ASID_H__ - -#include -#include -#include - -void svm_asid_init(const struct cpuinfo_x86 *c); -void svm_asid_handle_vmrun(void); - -static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_linear) -{ -#if 0 - /* Optimization? */ - svm_invlpga(g_linear, v->arch.hvm.svm.vmcb->guest_asid); -#endif - - /* Safe fallback. Take a new ASID. */ - hvm_asid_flush_vcpu(v); -} - -#endif /* __ASM_X86_HVM_SVM_ASID_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/svm/emulate.h b/xen/include/asm-x86/hvm/svm/emulate.h deleted file mode 100644 index eb1a8c24af..0000000000 --- a/xen/include/asm-x86/hvm/svm/emulate.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * emulate.h: SVM instruction emulation bits. - * Copyright (c) 2005, AMD Corporation. - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_SVM_EMULATE_H__ -#define __ASM_X86_HVM_SVM_EMULATE_H__ - -/* - * Encoding for svm_get_insn_len(). We take X86EMUL_OPC() for the main - * opcode, shifted left to make room for the ModRM byte. - * - * The Grp7 instructions have their ModRM byte expressed in octal for easier - * cross referencing with the opcode extension table. - */ -#define INSTR_ENC(opc, modrm) (((opc) << 8) | (modrm)) - -#define INSTR_PAUSE INSTR_ENC(X86EMUL_OPC_F3(0, 0x90), 0) -#define INSTR_INT3 INSTR_ENC(X86EMUL_OPC( 0, 0xcc), 0) -#define INSTR_ICEBP INSTR_ENC(X86EMUL_OPC( 0, 0xf1), 0) -#define INSTR_HLT INSTR_ENC(X86EMUL_OPC( 0, 0xf4), 0) -#define INSTR_XSETBV INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0321) -#define INSTR_VMRUN INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0330) -#define INSTR_VMCALL INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0331) -#define INSTR_VMLOAD INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0332) -#define INSTR_VMSAVE INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0333) -#define INSTR_STGI INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0334) -#define INSTR_CLGI INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0335) -#define INSTR_INVLPGA INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0337) -#define INSTR_RDTSCP INSTR_ENC(X86EMUL_OPC(0x0f, 0x01), 0371) -#define INSTR_INVD INSTR_ENC(X86EMUL_OPC(0x0f, 0x08), 0) -#define INSTR_WBINVD INSTR_ENC(X86EMUL_OPC(0x0f, 0x09), 0) -#define INSTR_WRMSR INSTR_ENC(X86EMUL_OPC(0x0f, 0x30), 0) -#define INSTR_RDTSC INSTR_ENC(X86EMUL_OPC(0x0f, 0x31), 0) -#define INSTR_RDMSR INSTR_ENC(X86EMUL_OPC(0x0f, 0x32), 0) -#define INSTR_CPUID INSTR_ENC(X86EMUL_OPC(0x0f, 0xa2), 0) - -struct vcpu; - -unsigned int svm_get_insn_len(struct vcpu *v, unsigned int instr_enc); -unsigned int svm_get_task_switch_insn_len(void); - -#endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/svm/intr.h b/xen/include/asm-x86/hvm/svm/intr.h deleted file mode 100644 index ae52d9f948..0000000000 --- a/xen/include/asm-x86/hvm/svm/intr.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * intr.h: SVM Architecture related definitions - * Copyright (c) 2005, AMD Corporation. - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ - -#ifndef __ASM_X86_HVM_SVM_INTR_H__ -#define __ASM_X86_HVM_SVM_INTR_H__ - -void svm_intr_assist(void); - -#endif /* __ASM_X86_HVM_SVM_INTR_H__ */ diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h b/xen/include/asm-x86/hvm/svm/nestedsvm.h deleted file mode 100644 index 0873698457..0000000000 --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * nestedsvm.h: Nested Virtualization - * Copyright (c) 2011, Advanced Micro Devices, Inc - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ -#ifndef __ASM_X86_HVM_SVM_NESTEDSVM_H__ -#define __ASM_X86_HVM_SVM_NESTEDSVM_H__ - -#include -#include - -/* SVM specific intblk types, cannot be an enum because gcc 4.5 complains */ -/* GIF cleared */ -#define hvm_intblk_svm_gif hvm_intblk_arch - -struct nestedsvm { - bool_t ns_gif; - uint64_t ns_msr_hsavepa; /* MSR HSAVE_PA value */ - - /* l1 guest physical address of virtual vmcb used by prior VMRUN. - * Needed for VMCB Cleanbit emulation. - */ - uint64_t ns_ovvmcb_pa; - - /* virtual tscratio holding the value l1 guest writes to the - * MSR_AMD64_TSC_RATIO MSR. - */ - uint64_t ns_tscratio; - - /* Cached real intercepts of the l2 guest */ - uint32_t ns_cr_intercepts; - uint32_t ns_dr_intercepts; - uint32_t ns_exception_intercepts; - uint32_t ns_general1_intercepts; - uint32_t ns_general2_intercepts; - - /* Cached real lbr and other virtual extentions of the l2 guest */ - virt_ext_t ns_virt_ext; - - /* Cached real MSR permission bitmaps of the l2 guest */ - unsigned long *ns_cached_msrpm; - /* Merged MSR permission bitmap */ - unsigned long *ns_merged_msrpm; - - /* guest physical address of virtual io permission map */ - paddr_t ns_iomap_pa, ns_oiomap_pa; - /* Shadow io permission map */ - unsigned long *ns_iomap; - - uint64_t ns_cr0; /* Cached guest_cr[0] of l1 guest while l2 guest runs. - * Needed to handle FPU context switching */ - - /* Cache guest cr3/host cr3 the guest sets up for the l2 guest. - * Used by Shadow-on-Shadow and Nested-on-Nested. - * ns_vmcb_guestcr3: in l2 guest physical address space and points to - * the l2 guest page table - * ns_vmcb_hostcr3: in l1 guest physical address space and points to - * the l1 guest nested page table - */ - uint64_t ns_vmcb_guestcr3, ns_vmcb_hostcr3; - uint32_t ns_guest_asid; - - bool_t ns_hap_enabled; - - /* Only meaningful when vmexit_pending flag is set */ - struct { - uint64_t exitcode; /* native exitcode to inject into l1 guest */ - uint64_t exitinfo1; /* additional information to the exitcode */ - uint64_t exitinfo2; /* additional information to the exitcode */ - } ns_vmexit; - union { - uint32_t bytes; - struct { - uint32_t rflagsif: 1; - uint32_t vintrmask: 1; - uint32_t reserved: 30; - } fields; - } ns_hostflags; -}; - -#define vcpu_nestedsvm(v) (vcpu_nestedhvm(v).u.nsvm) - -/* True when l1 guest enabled SVM in EFER */ -#define nsvm_efer_svm_enabled(v) \ - (!!((v)->arch.hvm.guest_efer & EFER_SVME)) - -int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr); -void nestedsvm_vmexit_defer(struct vcpu *v, - uint64_t exitcode, uint64_t exitinfo1, uint64_t exitinfo2); -enum nestedhvm_vmexits -nestedsvm_vmexit_n2n1(struct vcpu *v, struct cpu_user_regs *regs); -enum nestedhvm_vmexits -nestedsvm_check_intercepts(struct vcpu *v, struct cpu_user_regs *regs, - uint64_t exitcode); -void svm_nested_features_on_efer_update(struct vcpu *v); - -/* Interface methods */ -void nsvm_vcpu_destroy(struct vcpu *v); -int nsvm_vcpu_initialise(struct vcpu *v); -int nsvm_vcpu_reset(struct vcpu *v); -int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs); -int nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event *event); -uint64_t nsvm_vcpu_hostcr3(struct vcpu *v); -bool_t nsvm_vmcb_guest_intercepts_event( - struct vcpu *v, unsigned int vector, int errcode); -bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); -enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); - -/* Interrupts, vGIF */ -void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v); -void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v); -bool_t nestedsvm_gif_isset(struct vcpu *v); -int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, - unsigned int *page_order, uint8_t *p2m_acc, - bool_t access_r, bool_t access_w, bool_t access_x); - -#define NSVM_INTR_NOTHANDLED 3 -#define NSVM_INTR_NOTINTERCEPTED 2 -#define NSVM_INTR_FORCEVMEXIT 1 -#define NSVM_INTR_MASKED 0 -int nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack); - -#endif /* ASM_X86_HVM_SVM_NESTEDSVM_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h deleted file mode 100644 index 05e9685026..0000000000 --- a/xen/include/asm-x86/hvm/svm/svm.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * svm.h: SVM Architecture related definitions - * Copyright (c) 2005, AMD Corporation. - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ - -#ifndef __ASM_X86_HVM_SVM_H__ -#define __ASM_X86_HVM_SVM_H__ - -#include - -static inline void svm_vmload_pa(paddr_t vmcb) -{ - asm volatile ( - ".byte 0x0f,0x01,0xda" /* vmload */ - : : "a" (vmcb) : "memory" ); -} - -static inline void svm_vmsave_pa(paddr_t vmcb) -{ - asm volatile ( - ".byte 0x0f,0x01,0xdb" /* vmsave */ - : : "a" (vmcb) : "memory" ); -} - -static inline void svm_invlpga(unsigned long linear, uint32_t asid) -{ - asm volatile ( - ".byte 0x0f,0x01,0xdf" - : /* output */ - : /* input */ - "a" (linear), "c" (asid)); -} - -unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr); -void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len); -void svm_update_guest_cr(struct vcpu *, unsigned int cr, unsigned int flags); - -/* - * PV context switch helpers. Prefetching the VMCB area itself has been shown - * to be useful for performance. - * - * Must only be used for NUL FS/GS, as the segment attributes/limits are not - * read from the GDT/LDT. - */ -void svm_load_segs_prefetch(void); -bool svm_load_segs(unsigned int ldt_ents, unsigned long ldt_base, - unsigned long fs_base, unsigned long gs_base, - unsigned long gs_shadow); - -extern u32 svm_feature_flags; - -#define SVM_FEATURE_NPT 0 /* Nested page table support */ -#define SVM_FEATURE_LBRV 1 /* LBR virtualization support */ -#define SVM_FEATURE_SVML 2 /* SVM locking MSR support */ -#define SVM_FEATURE_NRIPS 3 /* Next RIP save on VMEXIT support */ -#define SVM_FEATURE_TSCRATEMSR 4 /* TSC ratio MSR support */ -#define SVM_FEATURE_VMCBCLEAN 5 /* VMCB clean bits support */ -#define SVM_FEATURE_FLUSHBYASID 6 /* TLB flush by ASID support */ -#define SVM_FEATURE_DECODEASSISTS 7 /* Decode assists support */ -#define SVM_FEATURE_PAUSEFILTER 10 /* Pause intercept filter support */ -#define SVM_FEATURE_PAUSETHRESH 12 /* Pause intercept filter support */ -#define SVM_FEATURE_VLOADSAVE 15 /* virtual vmload/vmsave */ -#define SVM_FEATURE_VGIF 16 /* Virtual GIF */ -#define SVM_FEATURE_SSS 19 /* NPT Supervisor Shadow Stacks */ -#define SVM_FEATURE_SPEC_CTRL 20 /* MSR_SPEC_CTRL virtualisation */ - -#define cpu_has_svm_feature(f) (svm_feature_flags & (1u << (f))) -#define cpu_has_svm_npt cpu_has_svm_feature(SVM_FEATURE_NPT) -#define cpu_has_svm_lbrv cpu_has_svm_feature(SVM_FEATURE_LBRV) -#define cpu_has_svm_svml cpu_has_svm_feature(SVM_FEATURE_SVML) -#define cpu_has_svm_nrips cpu_has_svm_feature(SVM_FEATURE_NRIPS) -#define cpu_has_svm_cleanbits cpu_has_svm_feature(SVM_FEATURE_VMCBCLEAN) -#define cpu_has_svm_flushbyasid cpu_has_svm_feature(SVM_FEATURE_FLUSHBYASID) -#define cpu_has_svm_decode cpu_has_svm_feature(SVM_FEATURE_DECODEASSISTS) -#define cpu_has_svm_vgif cpu_has_svm_feature(SVM_FEATURE_VGIF) -#define cpu_has_pause_filter cpu_has_svm_feature(SVM_FEATURE_PAUSEFILTER) -#define cpu_has_pause_thresh cpu_has_svm_feature(SVM_FEATURE_PAUSETHRESH) -#define cpu_has_tsc_ratio cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR) -#define cpu_has_svm_vloadsave cpu_has_svm_feature(SVM_FEATURE_VLOADSAVE) -#define cpu_has_svm_sss cpu_has_svm_feature(SVM_FEATURE_SSS) -#define cpu_has_svm_spec_ctrl cpu_has_svm_feature(SVM_FEATURE_SPEC_CTRL) - -#define SVM_PAUSEFILTER_INIT 4000 -#define SVM_PAUSETHRESH_INIT 1000 - -/* TSC rate */ -#define DEFAULT_TSC_RATIO 0x0000000100000000ULL -#define TSC_RATIO_RSVD_BITS 0xffffff0000000000ULL - -/* EXITINFO1 fields on NPT faults */ -#define _NPT_PFEC_with_gla 32 -#define NPT_PFEC_with_gla (1UL<<_NPT_PFEC_with_gla) -#define _NPT_PFEC_in_gpt 33 -#define NPT_PFEC_in_gpt (1UL<<_NPT_PFEC_in_gpt) - -#endif /* __ASM_X86_HVM_SVM_H__ */ diff --git a/xen/include/asm-x86/hvm/svm/svmdebug.h b/xen/include/asm-x86/hvm/svm/svmdebug.h deleted file mode 100644 index 330c1d91aa..0000000000 --- a/xen/include/asm-x86/hvm/svm/svmdebug.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * svmdebug.h: SVM related debug defintions - * Copyright (c) 2011, AMD Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ - -#ifndef __ASM_X86_HVM_SVM_SVMDEBUG_H__ -#define __ASM_X86_HVM_SVM_SVMDEBUG_H__ - -#include -#include - -void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state); -void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb); -bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb, - const struct vcpu *v, bool verbose); - -#endif /* __ASM_X86_HVM_SVM_SVMDEBUG_H__ */ diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h deleted file mode 100644 index ed7cebea71..0000000000 --- a/xen/include/asm-x86/hvm/svm/vmcb.h +++ /dev/null @@ -1,664 +0,0 @@ -/* - * vmcb.h: VMCB related definitions - * Copyright (c) 2005-2007, Advanced Micro Devices, Inc - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ -#ifndef __ASM_X86_HVM_SVM_VMCB_H__ -#define __ASM_X86_HVM_SVM_VMCB_H__ - -#include - -/* general 1 intercepts */ -enum GenericIntercept1bits -{ - GENERAL1_INTERCEPT_INTR = 1 << 0, - GENERAL1_INTERCEPT_NMI = 1 << 1, - GENERAL1_INTERCEPT_SMI = 1 << 2, - GENERAL1_INTERCEPT_INIT = 1 << 3, - GENERAL1_INTERCEPT_VINTR = 1 << 4, - GENERAL1_INTERCEPT_CR0_SEL_WRITE = 1 << 5, - GENERAL1_INTERCEPT_IDTR_READ = 1 << 6, - GENERAL1_INTERCEPT_GDTR_READ = 1 << 7, - GENERAL1_INTERCEPT_LDTR_READ = 1 << 8, - GENERAL1_INTERCEPT_TR_READ = 1 << 9, - GENERAL1_INTERCEPT_IDTR_WRITE = 1 << 10, - GENERAL1_INTERCEPT_GDTR_WRITE = 1 << 11, - GENERAL1_INTERCEPT_LDTR_WRITE = 1 << 12, - GENERAL1_INTERCEPT_TR_WRITE = 1 << 13, - GENERAL1_INTERCEPT_RDTSC = 1 << 14, - GENERAL1_INTERCEPT_RDPMC = 1 << 15, - GENERAL1_INTERCEPT_PUSHF = 1 << 16, - GENERAL1_INTERCEPT_POPF = 1 << 17, - GENERAL1_INTERCEPT_CPUID = 1 << 18, - GENERAL1_INTERCEPT_RSM = 1 << 19, - GENERAL1_INTERCEPT_IRET = 1 << 20, - GENERAL1_INTERCEPT_SWINT = 1 << 21, - GENERAL1_INTERCEPT_INVD = 1 << 22, - GENERAL1_INTERCEPT_PAUSE = 1 << 23, - GENERAL1_INTERCEPT_HLT = 1 << 24, - GENERAL1_INTERCEPT_INVLPG = 1 << 25, - GENERAL1_INTERCEPT_INVLPGA = 1 << 26, - GENERAL1_INTERCEPT_IOIO_PROT = 1 << 27, - GENERAL1_INTERCEPT_MSR_PROT = 1 << 28, - GENERAL1_INTERCEPT_TASK_SWITCH = 1 << 29, - GENERAL1_INTERCEPT_FERR_FREEZE = 1 << 30, - GENERAL1_INTERCEPT_SHUTDOWN_EVT = 1u << 31 -}; - -/* general 2 intercepts */ -enum GenericIntercept2bits -{ - GENERAL2_INTERCEPT_VMRUN = 1 << 0, - GENERAL2_INTERCEPT_VMMCALL = 1 << 1, - GENERAL2_INTERCEPT_VMLOAD = 1 << 2, - GENERAL2_INTERCEPT_VMSAVE = 1 << 3, - GENERAL2_INTERCEPT_STGI = 1 << 4, - GENERAL2_INTERCEPT_CLGI = 1 << 5, - GENERAL2_INTERCEPT_SKINIT = 1 << 6, - GENERAL2_INTERCEPT_RDTSCP = 1 << 7, - GENERAL2_INTERCEPT_ICEBP = 1 << 8, - GENERAL2_INTERCEPT_WBINVD = 1 << 9, - GENERAL2_INTERCEPT_MONITOR = 1 << 10, - GENERAL2_INTERCEPT_MWAIT = 1 << 11, - GENERAL2_INTERCEPT_MWAIT_CONDITIONAL = 1 << 12, - GENERAL2_INTERCEPT_XSETBV = 1 << 13, - GENERAL2_INTERCEPT_RDPRU = 1 << 14, -}; - - -/* control register intercepts */ -enum CRInterceptBits -{ - CR_INTERCEPT_CR0_READ = 1 << 0, - CR_INTERCEPT_CR1_READ = 1 << 1, - CR_INTERCEPT_CR2_READ = 1 << 2, - CR_INTERCEPT_CR3_READ = 1 << 3, - CR_INTERCEPT_CR4_READ = 1 << 4, - CR_INTERCEPT_CR5_READ = 1 << 5, - CR_INTERCEPT_CR6_READ = 1 << 6, - CR_INTERCEPT_CR7_READ = 1 << 7, - CR_INTERCEPT_CR8_READ = 1 << 8, - CR_INTERCEPT_CR9_READ = 1 << 9, - CR_INTERCEPT_CR10_READ = 1 << 10, - CR_INTERCEPT_CR11_READ = 1 << 11, - CR_INTERCEPT_CR12_READ = 1 << 12, - CR_INTERCEPT_CR13_READ = 1 << 13, - CR_INTERCEPT_CR14_READ = 1 << 14, - CR_INTERCEPT_CR15_READ = 1 << 15, - CR_INTERCEPT_CR0_WRITE = 1 << 16, - CR_INTERCEPT_CR1_WRITE = 1 << 17, - CR_INTERCEPT_CR2_WRITE = 1 << 18, - CR_INTERCEPT_CR3_WRITE = 1 << 19, - CR_INTERCEPT_CR4_WRITE = 1 << 20, - CR_INTERCEPT_CR5_WRITE = 1 << 21, - CR_INTERCEPT_CR6_WRITE = 1 << 22, - CR_INTERCEPT_CR7_WRITE = 1 << 23, - CR_INTERCEPT_CR8_WRITE = 1 << 24, - CR_INTERCEPT_CR9_WRITE = 1 << 25, - CR_INTERCEPT_CR10_WRITE = 1 << 26, - CR_INTERCEPT_CR11_WRITE = 1 << 27, - CR_INTERCEPT_CR12_WRITE = 1 << 28, - CR_INTERCEPT_CR13_WRITE = 1 << 29, - CR_INTERCEPT_CR14_WRITE = 1 << 30, - CR_INTERCEPT_CR15_WRITE = 1u << 31, -}; - - -/* debug register intercepts */ -enum DRInterceptBits -{ - DR_INTERCEPT_DR0_READ = 1 << 0, - DR_INTERCEPT_DR1_READ = 1 << 1, - DR_INTERCEPT_DR2_READ = 1 << 2, - DR_INTERCEPT_DR3_READ = 1 << 3, - DR_INTERCEPT_DR4_READ = 1 << 4, - DR_INTERCEPT_DR5_READ = 1 << 5, - DR_INTERCEPT_DR6_READ = 1 << 6, - DR_INTERCEPT_DR7_READ = 1 << 7, - DR_INTERCEPT_DR8_READ = 1 << 8, - DR_INTERCEPT_DR9_READ = 1 << 9, - DR_INTERCEPT_DR10_READ = 1 << 10, - DR_INTERCEPT_DR11_READ = 1 << 11, - DR_INTERCEPT_DR12_READ = 1 << 12, - DR_INTERCEPT_DR13_READ = 1 << 13, - DR_INTERCEPT_DR14_READ = 1 << 14, - DR_INTERCEPT_DR15_READ = 1 << 15, - DR_INTERCEPT_DR0_WRITE = 1 << 16, - DR_INTERCEPT_DR1_WRITE = 1 << 17, - DR_INTERCEPT_DR2_WRITE = 1 << 18, - DR_INTERCEPT_DR3_WRITE = 1 << 19, - DR_INTERCEPT_DR4_WRITE = 1 << 20, - DR_INTERCEPT_DR5_WRITE = 1 << 21, - DR_INTERCEPT_DR6_WRITE = 1 << 22, - DR_INTERCEPT_DR7_WRITE = 1 << 23, - DR_INTERCEPT_DR8_WRITE = 1 << 24, - DR_INTERCEPT_DR9_WRITE = 1 << 25, - DR_INTERCEPT_DR10_WRITE = 1 << 26, - DR_INTERCEPT_DR11_WRITE = 1 << 27, - DR_INTERCEPT_DR12_WRITE = 1 << 28, - DR_INTERCEPT_DR13_WRITE = 1 << 29, - DR_INTERCEPT_DR14_WRITE = 1 << 30, - DR_INTERCEPT_DR15_WRITE = 1u << 31, -}; - -enum VMEXIT_EXITCODE -{ - /* control register read exitcodes */ - VMEXIT_CR0_READ = 0, /* 0x0 */ - VMEXIT_CR1_READ = 1, /* 0x1 */ - VMEXIT_CR2_READ = 2, /* 0x2 */ - VMEXIT_CR3_READ = 3, /* 0x3 */ - VMEXIT_CR4_READ = 4, /* 0x4 */ - VMEXIT_CR5_READ = 5, /* 0x5 */ - VMEXIT_CR6_READ = 6, /* 0x6 */ - VMEXIT_CR7_READ = 7, /* 0x7 */ - VMEXIT_CR8_READ = 8, /* 0x8 */ - VMEXIT_CR9_READ = 9, /* 0x9 */ - VMEXIT_CR10_READ = 10, /* 0xa */ - VMEXIT_CR11_READ = 11, /* 0xb */ - VMEXIT_CR12_READ = 12, /* 0xc */ - VMEXIT_CR13_READ = 13, /* 0xd */ - VMEXIT_CR14_READ = 14, /* 0xe */ - VMEXIT_CR15_READ = 15, /* 0xf */ - - /* control register write exitcodes */ - VMEXIT_CR0_WRITE = 16, /* 0x10 */ - VMEXIT_CR1_WRITE = 17, /* 0x11 */ - VMEXIT_CR2_WRITE = 18, /* 0x12 */ - VMEXIT_CR3_WRITE = 19, /* 0x13 */ - VMEXIT_CR4_WRITE = 20, /* 0x14 */ - VMEXIT_CR5_WRITE = 21, /* 0x15 */ - VMEXIT_CR6_WRITE = 22, /* 0x16 */ - VMEXIT_CR7_WRITE = 23, /* 0x17 */ - VMEXIT_CR8_WRITE = 24, /* 0x18 */ - VMEXIT_CR9_WRITE = 25, /* 0x19 */ - VMEXIT_CR10_WRITE = 26, /* 0x1a */ - VMEXIT_CR11_WRITE = 27, /* 0x1b */ - VMEXIT_CR12_WRITE = 28, /* 0x1c */ - VMEXIT_CR13_WRITE = 29, /* 0x1d */ - VMEXIT_CR14_WRITE = 30, /* 0x1e */ - VMEXIT_CR15_WRITE = 31, /* 0x1f */ - - /* debug register read exitcodes */ - VMEXIT_DR0_READ = 32, /* 0x20 */ - VMEXIT_DR1_READ = 33, /* 0x21 */ - VMEXIT_DR2_READ = 34, /* 0x22 */ - VMEXIT_DR3_READ = 35, /* 0x23 */ - VMEXIT_DR4_READ = 36, /* 0x24 */ - VMEXIT_DR5_READ = 37, /* 0x25 */ - VMEXIT_DR6_READ = 38, /* 0x26 */ - VMEXIT_DR7_READ = 39, /* 0x27 */ - VMEXIT_DR8_READ = 40, /* 0x28 */ - VMEXIT_DR9_READ = 41, /* 0x29 */ - VMEXIT_DR10_READ = 42, /* 0x2a */ - VMEXIT_DR11_READ = 43, /* 0x2b */ - VMEXIT_DR12_READ = 44, /* 0x2c */ - VMEXIT_DR13_READ = 45, /* 0x2d */ - VMEXIT_DR14_READ = 46, /* 0x2e */ - VMEXIT_DR15_READ = 47, /* 0x2f */ - - /* debug register write exitcodes */ - VMEXIT_DR0_WRITE = 48, /* 0x30 */ - VMEXIT_DR1_WRITE = 49, /* 0x31 */ - VMEXIT_DR2_WRITE = 50, /* 0x32 */ - VMEXIT_DR3_WRITE = 51, /* 0x33 */ - VMEXIT_DR4_WRITE = 52, /* 0x34 */ - VMEXIT_DR5_WRITE = 53, /* 0x35 */ - VMEXIT_DR6_WRITE = 54, /* 0x36 */ - VMEXIT_DR7_WRITE = 55, /* 0x37 */ - VMEXIT_DR8_WRITE = 56, /* 0x38 */ - VMEXIT_DR9_WRITE = 57, /* 0x39 */ - VMEXIT_DR10_WRITE = 58, /* 0x3a */ - VMEXIT_DR11_WRITE = 59, /* 0x3b */ - VMEXIT_DR12_WRITE = 60, /* 0x3c */ - VMEXIT_DR13_WRITE = 61, /* 0x3d */ - VMEXIT_DR14_WRITE = 62, /* 0x3e */ - VMEXIT_DR15_WRITE = 63, /* 0x3f */ - - /* processor exception exitcodes (VMEXIT_EXCP[0-31]) */ - VMEXIT_EXCEPTION_DE = 64, /* 0x40, divide-by-zero-error */ - VMEXIT_EXCEPTION_DB = 65, /* 0x41, debug */ - VMEXIT_EXCEPTION_NMI = 66, /* 0x42, non-maskable-interrupt */ - VMEXIT_EXCEPTION_BP = 67, /* 0x43, breakpoint */ - VMEXIT_EXCEPTION_OF = 68, /* 0x44, overflow */ - VMEXIT_EXCEPTION_BR = 69, /* 0x45, bound-range */ - VMEXIT_EXCEPTION_UD = 70, /* 0x46, invalid-opcode*/ - VMEXIT_EXCEPTION_NM = 71, /* 0x47, device-not-available */ - VMEXIT_EXCEPTION_DF = 72, /* 0x48, double-fault */ - VMEXIT_EXCEPTION_09 = 73, /* 0x49, unsupported (reserved) */ - VMEXIT_EXCEPTION_TS = 74, /* 0x4a, invalid-tss */ - VMEXIT_EXCEPTION_NP = 75, /* 0x4b, segment-not-present */ - VMEXIT_EXCEPTION_SS = 76, /* 0x4c, stack */ - VMEXIT_EXCEPTION_GP = 77, /* 0x4d, general-protection */ - VMEXIT_EXCEPTION_PF = 78, /* 0x4e, page-fault */ - VMEXIT_EXCEPTION_15 = 79, /* 0x4f, reserved */ - VMEXIT_EXCEPTION_MF = 80, /* 0x50, x87 floating-point exception-pending */ - VMEXIT_EXCEPTION_AC = 81, /* 0x51, alignment-check */ - VMEXIT_EXCEPTION_MC = 82, /* 0x52, machine-check */ - VMEXIT_EXCEPTION_XF = 83, /* 0x53, simd floating-point */ -/* VMEXIT_EXCEPTION_20 = 84, 0x54, #VE (Intel specific) */ - VMEXIT_EXCEPTION_CP = 85, /* 0x55, controlflow protection */ - - /* exceptions 20-31 (exitcodes 84-95) are reserved */ - - /* ...and the rest of the #VMEXITs */ - VMEXIT_INTR = 96, /* 0x60 */ - VMEXIT_NMI = 97, /* 0x61 */ - VMEXIT_SMI = 98, /* 0x62 */ - VMEXIT_INIT = 99, /* 0x63 */ - VMEXIT_VINTR = 100, /* 0x64 */ - VMEXIT_CR0_SEL_WRITE = 101, /* 0x65 */ - VMEXIT_IDTR_READ = 102, /* 0x66 */ - VMEXIT_GDTR_READ = 103, /* 0x67 */ - VMEXIT_LDTR_READ = 104, /* 0x68 */ - VMEXIT_TR_READ = 105, /* 0x69 */ - VMEXIT_IDTR_WRITE = 106, /* 0x6a */ - VMEXIT_GDTR_WRITE = 107, /* 0x6b */ - VMEXIT_LDTR_WRITE = 108, /* 0x6c */ - VMEXIT_TR_WRITE = 109, /* 0x6d */ - VMEXIT_RDTSC = 110, /* 0x6e */ - VMEXIT_RDPMC = 111, /* 0x6f */ - VMEXIT_PUSHF = 112, /* 0x70 */ - VMEXIT_POPF = 113, /* 0x71 */ - VMEXIT_CPUID = 114, /* 0x72 */ - VMEXIT_RSM = 115, /* 0x73 */ - VMEXIT_IRET = 116, /* 0x74 */ - VMEXIT_SWINT = 117, /* 0x75 */ - VMEXIT_INVD = 118, /* 0x76 */ - VMEXIT_PAUSE = 119, /* 0x77 */ - VMEXIT_HLT = 120, /* 0x78 */ - VMEXIT_INVLPG = 121, /* 0x79 */ - VMEXIT_INVLPGA = 122, /* 0x7a */ - VMEXIT_IOIO = 123, /* 0x7b */ - VMEXIT_MSR = 124, /* 0x7c */ - VMEXIT_TASK_SWITCH = 125, /* 0x7d */ - VMEXIT_FERR_FREEZE = 126, /* 0x7e */ - VMEXIT_SHUTDOWN = 127, /* 0x7f */ - VMEXIT_VMRUN = 128, /* 0x80 */ - VMEXIT_VMMCALL = 129, /* 0x81 */ - VMEXIT_VMLOAD = 130, /* 0x82 */ - VMEXIT_VMSAVE = 131, /* 0x83 */ - VMEXIT_STGI = 132, /* 0x84 */ - VMEXIT_CLGI = 133, /* 0x85 */ - VMEXIT_SKINIT = 134, /* 0x86 */ - VMEXIT_RDTSCP = 135, /* 0x87 */ - VMEXIT_ICEBP = 136, /* 0x88 */ - VMEXIT_WBINVD = 137, /* 0x89 */ - VMEXIT_MONITOR = 138, /* 0x8a */ - VMEXIT_MWAIT = 139, /* 0x8b */ - VMEXIT_MWAIT_CONDITIONAL= 140, /* 0x8c */ - VMEXIT_XSETBV = 141, /* 0x8d */ - VMEXIT_RDPRU = 142, /* 0x8e */ - VMEXIT_NPF = 1024, /* 0x400, nested paging fault */ - VMEXIT_INVALID = -1 -}; - -enum -{ - /* Available on all SVM-capable hardware. */ - TLB_CTRL_NO_FLUSH = 0, - TLB_CTRL_FLUSH_ALL = 1, - - /* Available with the FlushByASID feature. */ - TLB_CTRL_FLUSH_ASID = 3, - TLB_CTRL_FLUSH_ASID_NONGLOBAL = 7, -}; - -typedef union -{ - struct - { - uint8_t vector; - uint8_t type:3; - bool ev:1; - uint32_t resvd1:19; - bool v:1; - uint32_t ec; - }; - uint64_t raw; -} intinfo_t; - -typedef union { - struct { - bool intr_shadow: 1; - bool guest_intr_mask:1; - }; - uint64_t raw; -} intstat_t; - -typedef union -{ - u64 bytes; - struct - { - u64 tpr: 8; - u64 irq: 1; - u64 vgif: 1; - u64 rsvd0: 6; - u64 prio: 4; - u64 ign_tpr: 1; - u64 rsvd1: 3; - u64 intr_masking: 1; - u64 vgif_enable: 1; - u64 rsvd2: 6; - u64 vector: 8; - u64 rsvd3: 24; - } fields; -} vintr_t; - -typedef union -{ - u64 bytes; - struct - { - u64 type: 1; - u64 rsv0: 1; - u64 str: 1; - u64 rep: 1; - u64 sz8: 1; - u64 sz16: 1; - u64 sz32: 1; - u64 rsv1: 9; - u64 port: 16; - } fields; -} ioio_info_t; - -typedef union -{ - u64 bytes; - struct - { - u64 lbr_enable:1; - u64 vloadsave_enable:1; - } fields; -} virt_ext_t; - -typedef union -{ - struct { - bool intercepts:1; /* 0: cr/dr/exception/general intercepts, - * pause_filter_count, tsc_offset */ - bool iopm:1; /* 1: iopm_base_pa, msrpm_base_pa */ - bool asid:1; /* 2: guest_asid */ - bool tpr:1; /* 3: vintr */ - bool np:1; /* 4: np_enable, h_cr3, g_pat */ - bool cr:1; /* 5: cr0, cr3, cr4, efer */ - bool dr:1; /* 6: dr6, dr7 */ - bool dt:1; /* 7: gdtr, idtr */ - bool seg:1; /* 8: cs, ds, es, ss, cpl */ - bool cr2:1; /* 9: cr2 */ - bool lbr:1; /* 10: debugctlmsr, last{branch,int}{to,from}ip */ - bool :1; - bool cet:1; /* 12: msr_s_set, ssp, msr_isst */ - }; - uint32_t raw; -} vmcbcleanbits_t; - -#define IOPM_SIZE (12 * 1024) -#define MSRPM_SIZE (8 * 1024) - -struct vmcb_struct { - u32 _cr_intercepts; /* offset 0x00 - cleanbit 0 */ - u32 _dr_intercepts; /* offset 0x04 - cleanbit 0 */ - u32 _exception_intercepts; /* offset 0x08 - cleanbit 0 */ - u32 _general1_intercepts; /* offset 0x0C - cleanbit 0 */ - u32 _general2_intercepts; /* offset 0x10 - cleanbit 0 */ - u32 res01[10]; - u16 _pause_filter_thresh; /* offset 0x3C - cleanbit 0 */ - u16 _pause_filter_count; /* offset 0x3E - cleanbit 0 */ - u64 _iopm_base_pa; /* offset 0x40 - cleanbit 1 */ - u64 _msrpm_base_pa; /* offset 0x48 - cleanbit 1 */ - u64 _tsc_offset; /* offset 0x50 - cleanbit 0 */ - u32 _guest_asid; /* offset 0x58 - cleanbit 2 */ - u8 tlb_control; /* offset 0x5C - TLB_CTRL_* */ - u8 res07[3]; - vintr_t _vintr; /* offset 0x60 - cleanbit 3 */ - intstat_t int_stat; /* offset 0x68 */ - u64 exitcode; /* offset 0x70 */ - union { - struct { - uint64_t exitinfo1; /* offset 0x78 */ - uint64_t exitinfo2; /* offset 0x80 */ - }; - union { - struct { - uint16_t sel; - uint64_t :48; - - uint32_t ec; - uint32_t :4; - bool iret:1; - uint32_t :1; - bool jmp:1; - uint32_t :5; - bool ev:1; - uint32_t :3; - bool rf:1; - } task_switch; - } ei; - }; - intinfo_t exit_int_info; /* offset 0x88 */ - union { /* offset 0x90 - cleanbit 4 */ - struct { - bool _np_enable :1; - bool _sev_enable :1; - bool _sev_es_enable :1; - bool _gmet :1; - bool _np_sss :1; - bool _vte :1; - }; - uint64_t _np_ctrl; - }; - u64 res08[2]; - intinfo_t event_inj; /* offset 0xA8 */ - u64 _h_cr3; /* offset 0xB0 - cleanbit 4 */ - virt_ext_t virt_ext; /* offset 0xB8 */ - vmcbcleanbits_t cleanbits; /* offset 0xC0 */ - u32 res09; /* offset 0xC4 */ - u64 nextrip; /* offset 0xC8 */ - u8 guest_ins_len; /* offset 0xD0 */ - u8 guest_ins[15]; /* offset 0xD1 */ - u64 res10a[100]; /* offset 0xE0 pad to save area */ - - union { - struct segment_register sreg[6]; - struct { - struct segment_register es; /* offset 0x400 - cleanbit 8 */ - struct segment_register cs; /* cleanbit 8 */ - struct segment_register ss; /* cleanbit 8 */ - struct segment_register ds; /* cleanbit 8 */ - struct segment_register fs; - struct segment_register gs; - }; - }; - struct segment_register gdtr; /* cleanbit 7 */ - struct segment_register ldtr; - struct segment_register idtr; /* cleanbit 7 */ - struct segment_register tr; - u64 res10[5]; - u8 res11[3]; - u8 _cpl; /* cleanbit 8 */ - u32 res12; - u64 _efer; /* offset 0x400 + 0xD0 - cleanbit 5 */ - u64 res13[14]; - u64 _cr4; /* offset 0x400 + 0x148 - cleanbit 5 */ - u64 _cr3; /* cleanbit 5 */ - u64 _cr0; /* cleanbit 5 */ - u64 _dr7; /* cleanbit 6 */ - u64 _dr6; /* cleanbit 6 */ - u64 rflags; - u64 rip; - u64 res14[11]; - u64 rsp; - u64 _msr_s_cet; /* offset 0x400 + 0x1E0 - cleanbit 12 */ - u64 _ssp; /* offset 0x400 + 0x1E8 | */ - u64 _msr_isst; /* offset 0x400 + 0x1F0 v */ - u64 rax; - u64 star; - u64 lstar; - u64 cstar; - u64 sfmask; - u64 kerngsbase; - u64 sysenter_cs; - u64 sysenter_esp; - u64 sysenter_eip; - u64 _cr2; /* cleanbit 9 */ - u64 res16[4]; - u64 _g_pat; /* cleanbit 4 */ - u64 _debugctlmsr; /* cleanbit 10 */ - u64 _lastbranchfromip; /* cleanbit 10 */ - u64 _lastbranchtoip; /* cleanbit 10 */ - u64 _lastintfromip; /* cleanbit 10 */ - u64 _lastinttoip; /* cleanbit 10 */ - u64 res17[9]; - u64 spec_ctrl; - u64 res18[291]; -}; - -struct svm_domain { - /* OSVW MSRs */ - union { - uint64_t raw[2]; - struct { - uint64_t length; - uint64_t status; - }; - } osvw; -}; - -/* - * VMRUN doesn't switch fs/gs/tr/ldtr and SHADOWGS/SYSCALL/SYSENTER state. - * Therefore, guest state is in the hardware registers when servicing a - * VMExit. - * - * Immediately after a VMExit, the vmcb is stale, and needs to be brought - * into sync by VMSAVE. If state in the vmcb is modified, a VMLOAD is - * needed before the following VMRUN. - */ -enum vmcb_sync_state { - vmcb_in_sync, - vmcb_needs_vmsave, /* VMCB out of sync (VMSAVE needed)? */ - vmcb_needs_vmload /* VMCB dirty (VMLOAD needed)? */ -}; - -struct svm_vcpu { - struct vmcb_struct *vmcb; - u64 vmcb_pa; - unsigned long *msrpm; - int launch_core; - - uint8_t vmcb_sync_state; /* enum vmcb_sync_state */ - - /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */ - uint8_t cached_insn_len; /* Zero if no cached instruction. */ - - /* Upper four bytes are undefined in the VMCB, therefore we can't - * use the fields in the VMCB. Write a 64bit value and then read a 64bit - * value is fine unless there's a VMRUN/VMEXIT in between which clears - * the upper four bytes. - */ - uint64_t guest_sysenter_cs; - uint64_t guest_sysenter_esp; - uint64_t guest_sysenter_eip; -}; - -struct vmcb_struct *alloc_vmcb(void); -void free_vmcb(struct vmcb_struct *vmcb); - -int svm_create_vmcb(struct vcpu *v); -void svm_destroy_vmcb(struct vcpu *v); - -void setup_vmcb_dump(void); - -#define MSR_INTERCEPT_NONE 0 -#define MSR_INTERCEPT_READ 1 -#define MSR_INTERCEPT_WRITE 2 -#define MSR_INTERCEPT_RW (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ) -void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable); -#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE) -#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW) - -/* - * VMCB accessor functions. - */ - -#define VMCB_ACCESSORS_(name, type, cleanbit) \ -static inline void \ -vmcb_set_ ## name(struct vmcb_struct *vmcb, \ - type value) \ -{ \ - vmcb->_ ## name = value; \ - vmcb->cleanbits.cleanbit = false; \ -} \ -static inline type \ -vmcb_get_ ## name(const struct vmcb_struct *vmcb) \ -{ \ - return vmcb->_ ## name; \ -} - -#define VMCB_ACCESSORS(name, cleanbit) \ - VMCB_ACCESSORS_(name, typeof(alloc_vmcb()->_ ## name), cleanbit) - -VMCB_ACCESSORS(cr_intercepts, intercepts) -VMCB_ACCESSORS(dr_intercepts, intercepts) -VMCB_ACCESSORS(exception_intercepts, intercepts) -VMCB_ACCESSORS(general1_intercepts, intercepts) -VMCB_ACCESSORS(general2_intercepts, intercepts) -VMCB_ACCESSORS(pause_filter_count, intercepts) -VMCB_ACCESSORS(pause_filter_thresh, intercepts) -VMCB_ACCESSORS(tsc_offset, intercepts) -VMCB_ACCESSORS(iopm_base_pa, iopm) -VMCB_ACCESSORS(msrpm_base_pa, iopm) -VMCB_ACCESSORS(guest_asid, asid) -VMCB_ACCESSORS(vintr, tpr) -VMCB_ACCESSORS(np_ctrl, np) -VMCB_ACCESSORS_(np_enable, bool, np) -VMCB_ACCESSORS_(sev_enable, bool, np) -VMCB_ACCESSORS_(sev_es_enable, bool, np) -VMCB_ACCESSORS_(gmet, bool, np) -VMCB_ACCESSORS_(vte, bool, np) -VMCB_ACCESSORS(h_cr3, np) -VMCB_ACCESSORS(g_pat, np) -VMCB_ACCESSORS(cr0, cr) -VMCB_ACCESSORS(cr3, cr) -VMCB_ACCESSORS(cr4, cr) -VMCB_ACCESSORS(efer, cr) -VMCB_ACCESSORS(dr6, dr) -VMCB_ACCESSORS(dr7, dr) -VMCB_ACCESSORS(cpl, seg) -VMCB_ACCESSORS(cr2, cr2) -VMCB_ACCESSORS(debugctlmsr, lbr) -VMCB_ACCESSORS(lastbranchfromip, lbr) -VMCB_ACCESSORS(lastbranchtoip, lbr) -VMCB_ACCESSORS(lastintfromip, lbr) -VMCB_ACCESSORS(lastinttoip, lbr) -VMCB_ACCESSORS(msr_s_cet, cet) -VMCB_ACCESSORS(ssp, cet) -VMCB_ACCESSORS(msr_isst, cet) - -#undef VMCB_ACCESSORS - -#endif /* ASM_X86_HVM_SVM_VMCS_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/trace.h b/xen/include/asm-x86/hvm/trace.h deleted file mode 100644 index 145b59f6ac..0000000000 --- a/xen/include/asm-x86/hvm/trace.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef __ASM_X86_HVM_TRACE_H__ -#define __ASM_X86_HVM_TRACE_H__ - -#include - -#define DEFAULT_HVM_TRACE_ON 1 -#define DEFAULT_HVM_TRACE_OFF 0 - -#define DEFAULT_HVM_VMSWITCH DEFAULT_HVM_TRACE_ON -#define DEFAULT_HVM_PF DEFAULT_HVM_TRACE_ON -#define DEFAULT_HVM_INJECT DEFAULT_HVM_TRACE_ON -#define DEFAULT_HVM_IO DEFAULT_HVM_TRACE_ON -#define DEFAULT_HVM_REGACCESS DEFAULT_HVM_TRACE_ON -#define DEFAULT_HVM_MISC DEFAULT_HVM_TRACE_ON -#define DEFAULT_HVM_INTR DEFAULT_HVM_TRACE_ON - -#define DO_TRC_HVM_VMENTRY DEFAULT_HVM_VMSWITCH -#define DO_TRC_HVM_VMEXIT DEFAULT_HVM_VMSWITCH -#define DO_TRC_HVM_VMEXIT64 DEFAULT_HVM_VMSWITCH -#define DO_TRC_HVM_PF_XEN DEFAULT_HVM_PF -#define DO_TRC_HVM_PF_XEN64 DEFAULT_HVM_PF -#define DO_TRC_HVM_PF_INJECT DEFAULT_HVM_PF -#define DO_TRC_HVM_PF_INJECT64 DEFAULT_HVM_PF -#define DO_TRC_HVM_INJ_EXC DEFAULT_HVM_INJECT -#define DO_TRC_HVM_INJ_VIRQ DEFAULT_HVM_INJECT -#define DO_TRC_HVM_REINJ_VIRQ DEFAULT_HVM_INJECT -#define DO_TRC_HVM_INTR_WINDOW DEFAULT_HVM_INJECT -#define DO_TRC_HVM_IO_READ DEFAULT_HVM_IO -#define DO_TRC_HVM_IO_WRITE DEFAULT_HVM_IO -#define DO_TRC_HVM_CR_READ DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_CR_READ64 DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_CR_WRITE DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_CR_WRITE64 DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_DR_READ DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_DR_WRITE DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_XCR_READ64 DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_XCR_WRITE64 DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_MSR_READ DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_MSR_WRITE DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_RDTSC DEFAULT_HVM_REGACCESS -#define DO_TRC_HVM_CPUID DEFAULT_HVM_MISC -#define DO_TRC_HVM_INTR DEFAULT_HVM_INTR -#define DO_TRC_HVM_NMI DEFAULT_HVM_INTR -#define DO_TRC_HVM_MCE DEFAULT_HVM_INTR -#define DO_TRC_HVM_SMI DEFAULT_HVM_INTR -#define DO_TRC_HVM_VMMCALL DEFAULT_HVM_MISC -#define DO_TRC_HVM_HLT DEFAULT_HVM_MISC -#define DO_TRC_HVM_INVLPG DEFAULT_HVM_MISC -#define DO_TRC_HVM_INVLPG64 DEFAULT_HVM_MISC -#define DO_TRC_HVM_IO_ASSIST DEFAULT_HVM_MISC -#define DO_TRC_HVM_MMIO_ASSIST DEFAULT_HVM_MISC -#define DO_TRC_HVM_CLTS DEFAULT_HVM_MISC -#define DO_TRC_HVM_LMSW DEFAULT_HVM_MISC -#define DO_TRC_HVM_LMSW64 DEFAULT_HVM_MISC -#define DO_TRC_HVM_REALMODE_EMULATE DEFAULT_HVM_MISC -#define DO_TRC_HVM_TRAP DEFAULT_HVM_MISC -#define DO_TRC_HVM_TRAP_DEBUG DEFAULT_HVM_MISC -#define DO_TRC_HVM_VLAPIC DEFAULT_HVM_MISC - - -#define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32) - -#define TRACE_2_LONG_2D(_e, d1, d2, ...) \ - TRACE_4D(_e, d1, d2) -#define TRACE_2_LONG_3D(_e, d1, d2, d3, ...) \ - TRACE_5D(_e, d1, d2, d3) -#define TRACE_2_LONG_4D(_e, d1, d2, d3, d4, ...) \ - TRACE_6D(_e, d1, d2, d3, d4) - -#define HVMTRACE_ND(evt, modifier, cycles, ...) \ - do { \ - if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \ - { \ - uint32_t _d[] = { __VA_ARGS__ }; \ - __trace_var(TRC_HVM_ ## evt | (modifier), cycles, \ - sizeof(_d), sizeof(_d) ? _d : NULL); \ - } \ - } while(0) - -#define HVMTRACE_6D(evt, d1, d2, d3, d4, d5, d6) \ - HVMTRACE_ND(evt, 0, 0, d1, d2, d3, d4, d5, d6) -#define HVMTRACE_5D(evt, d1, d2, d3, d4, d5) \ - HVMTRACE_ND(evt, 0, 0, d1, d2, d3, d4, d5) -#define HVMTRACE_4D(evt, d1, d2, d3, d4) \ - HVMTRACE_ND(evt, 0, 0, d1, d2, d3, d4) -#define HVMTRACE_3D(evt, d1, d2, d3) \ - HVMTRACE_ND(evt, 0, 0, d1, d2, d3) -#define HVMTRACE_2D(evt, d1, d2) \ - HVMTRACE_ND(evt, 0, 0, d1, d2) -#define HVMTRACE_1D(evt, d1) \ - HVMTRACE_ND(evt, 0, 0, d1) -#define HVMTRACE_0D(evt) \ - HVMTRACE_ND(evt, 0, 0) - -#define HVMTRACE_LONG_1D(evt, d1) \ - HVMTRACE_2D(evt ## 64, (d1) & 0xFFFFFFFF, (d1) >> 32) -#define HVMTRACE_LONG_2D(evt, d1, d2, ...) \ - HVMTRACE_3D(evt ## 64, d1, d2) -#define HVMTRACE_LONG_3D(evt, d1, d2, d3, ...) \ - HVMTRACE_4D(evt ## 64, d1, d2, d3) -#define HVMTRACE_LONG_4D(evt, d1, d2, d3, d4, ...) \ - HVMTRACE_5D(evt ## 64, d1, d2, d3, d4) - -#endif /* __ASM_X86_HVM_TRACE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h deleted file mode 100644 index 8adf4555c2..0000000000 --- a/xen/include/asm-x86/hvm/vcpu.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * vcpu.h: HVM per vcpu definitions - * - * Copyright (c) 2005, International Business Machines Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_VCPU_H__ -#define __ASM_X86_HVM_VCPU_H__ - -#include -#include -#include -#include -#include -#include -#include -#include - -struct hvm_vcpu_asid { - uint64_t generation; - uint32_t asid; -}; - -/* - * We may read or write up to m512 as a number of device-model - * transactions. - */ -struct hvm_mmio_cache { - unsigned long gla; - unsigned int size; - uint8_t dir; - uint8_t buffer[64] __aligned(sizeof(long)); -}; - -struct hvm_vcpu_io { - /* - * HVM emulation: - * Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn. - * The latter is known to be an MMIO frame (not RAM). - * This translation is only valid for accesses as per @mmio_access. - */ - struct npfec mmio_access; - unsigned long mmio_gla; - unsigned long mmio_gpfn; - - /* - * We may need to handle up to 3 distinct memory accesses per - * instruction. - */ - struct hvm_mmio_cache mmio_cache[3]; - unsigned int mmio_cache_count; - - /* For retries we shouldn't re-fetch the instruction. */ - unsigned int mmio_insn_bytes; - unsigned char mmio_insn[16]; - struct hvmemul_cache *cache; - - /* - * For string instruction emulation we need to be able to signal a - * necessary retry through other than function return codes. - */ - bool_t mmio_retry; - - unsigned long msix_unmask_address; - unsigned long msix_snoop_address; - unsigned long msix_snoop_gpa; - - const struct g2m_ioport *g2m_ioport; -}; - -struct nestedvcpu { - bool_t nv_guestmode; /* vcpu in guestmode? */ - void *nv_vvmcx; /* l1 guest virtual VMCB/VMCS */ - void *nv_n1vmcx; /* VMCB/VMCS used to run l1 guest */ - void *nv_n2vmcx; /* shadow VMCB/VMCS used to run l2 guest */ - - uint64_t nv_vvmcxaddr; /* l1 guest physical address of nv_vvmcx */ - paddr_t nv_n1vmcx_pa; /* host physical address of nv_n1vmcx */ - paddr_t nv_n2vmcx_pa; /* host physical address of nv_n2vmcx */ - - /* SVM/VMX arch specific */ - union { - struct nestedsvm nsvm; - struct nestedvmx nvmx; - } u; - - bool_t nv_flushp2m; /* True, when p2m table must be flushed */ - struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */ - bool stale_np2m; /* True when p2m_base in VMCx02 is no longer valid */ - uint64_t np2m_generation; - - struct hvm_vcpu_asid nv_n2asid; - - bool_t nv_vmentry_pending; - bool_t nv_vmexit_pending; - bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */ - - /* Does l1 guest intercept io ports 0x80 and/or 0xED ? - * Useful to optimize io permission handling. - */ - bool_t nv_ioport80; - bool_t nv_ioportED; - - /* L2's control-resgister, just as the L2 sees them. */ - unsigned long guest_cr[5]; -}; - -#define vcpu_nestedhvm(v) ((v)->arch.hvm.nvcpu) - -struct altp2mvcpu { - /* - * #VE information page. This pointer being non-NULL indicates that a - * VMCS's VIRT_EXCEPTION_INFO field is pointing to the page, and an extra - * page reference is held. - */ - struct page_info *veinfo_pg; - uint16_t p2midx; /* alternate p2m index */ -}; - -#define vcpu_altp2m(v) ((v)->arch.hvm.avcpu) - -struct hvm_vcpu { - /* Guest control-register and EFER values, just as the guest sees them. */ - unsigned long guest_cr[5]; - unsigned long guest_efer; - - /* - * Processor-visible control-register values, while guest executes. - * CR0, CR4: Used as a cache of VMCS contents by VMX only. - * CR1, CR2: Never used (guest_cr[2] is always processor-visible CR2). - * CR3: Always used and kept up to date by paging subsystem. - */ - unsigned long hw_cr[5]; - - struct vlapic vlapic; - s64 cache_tsc_offset; - u64 guest_time; - - /* Lock and list for virtual platform timers. */ - spinlock_t tm_lock; - struct list_head tm_list; - - bool flag_dr_dirty; - bool debug_state_latch; - bool single_step; - struct { - bool enabled; - uint16_t p2midx; - } fast_single_step; - - /* (MFN) hypervisor page table */ - pagetable_t monitor_table; - - struct hvm_vcpu_asid n1asid; - - u64 msr_tsc_adjust; - - union { - struct vmx_vcpu vmx; - struct svm_vcpu svm; - }; - - struct tasklet assert_evtchn_irq_tasklet; - - struct nestedvcpu nvcpu; - - struct altp2mvcpu avcpu; - - struct mtrr_state mtrr; - u64 pat_cr; - - /* In mode delay_for_missed_ticks, VCPUs have differing guest times. */ - int64_t stime_offset; - - u8 evtchn_upcall_vector; - - /* Which cache mode is this VCPU in (CR0:CD/NW)? */ - u8 cache_mode; - - struct hvm_vcpu_io hvm_io; - - /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */ - struct x86_event inject_event; - - struct viridian_vcpu *viridian; -}; - -#endif /* __ASM_X86_HVM_VCPU_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/vioapic.h b/xen/include/asm-x86/hvm/vioapic.h deleted file mode 100644 index 36b64d20d6..0000000000 --- a/xen/include/asm-x86/hvm/vioapic.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2001 MandrakeSoft S.A. - * - * MandrakeSoft S.A. - * 43, rue d'Aboukir - * 75002 Paris - France - * http://www.linux-mandrake.com/ - * http://www.mandrakesoft.com/ - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; If not, see . - */ - -#ifndef __ASM_X86_HVM_VIOAPIC_H__ -#define __ASM_X86_HVM_VIOAPIC_H__ - -#include -#include - -#define VIOAPIC_VERSION_ID 0x11 /* IOAPIC version */ - -#define VIOAPIC_EDGE_TRIG 0 -#define VIOAPIC_LEVEL_TRIG 1 - -#define VIOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000 -#define VIOAPIC_MEM_LENGTH 0x100 - -/* Direct registers. */ -#define VIOAPIC_REG_SELECT 0x00 -#define VIOAPIC_REG_WINDOW 0x10 -#define VIOAPIC_REG_EOI 0x40 - -/* Indirect registers. */ -#define VIOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ -#define VIOAPIC_REG_VERSION 0x01 -#define VIOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ -#define VIOAPIC_REG_RTE0 0x10 - -struct hvm_vioapic { - struct domain *domain; - uint32_t nr_pins; - unsigned int base_gsi; - union { - XEN_HVM_VIOAPIC(,); - struct hvm_hw_vioapic domU; - }; -}; - -#define domain_vioapic(d, i) ((d)->arch.hvm.vioapic[i]) -#define vioapic_domain(v) ((v)->domain) - -int vioapic_init(struct domain *d); -void vioapic_deinit(struct domain *d); -void vioapic_reset(struct domain *d); -void vioapic_irq_positive_edge(struct domain *d, unsigned int irq); -void vioapic_update_EOI(struct domain *d, u8 vector); - -int vioapic_get_mask(const struct domain *d, unsigned int gsi); -int vioapic_get_vector(const struct domain *d, unsigned int gsi); -int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi); - -#endif /* __ASM_X86_HVM_VIOAPIC_H__ */ diff --git a/xen/include/asm-x86/hvm/viridian.h b/xen/include/asm-x86/hvm/viridian.h deleted file mode 100644 index 4c8ff6e80b..0000000000 --- a/xen/include/asm-x86/hvm/viridian.h +++ /dev/null @@ -1,112 +0,0 @@ -/***************************************************************************** - * - * include/xen/viridian.h - * - * Copyright (c) 2008 Citrix Corp. - * - */ - -#ifndef __ASM_X86_HVM_VIRIDIAN_H__ -#define __ASM_X86_HVM_VIRIDIAN_H__ - -#include - -struct viridian_page -{ - union hv_vp_assist_page_msr msr; - void *ptr; -}; - -struct viridian_stimer { - struct vcpu *v; - struct timer timer; - union hv_stimer_config config; - uint64_t count; - uint64_t expiration; - bool started; -}; - -struct viridian_vcpu -{ - struct viridian_page vp_assist; - bool apic_assist_pending; - bool polled; - uint64_t scontrol; - uint64_t siefp; - struct viridian_page simp; - union hv_synic_sint sint[16]; - uint8_t vector_to_sintx[256]; - struct viridian_stimer stimer[4]; - unsigned int stimer_enabled; - unsigned int stimer_pending; - uint64_t crash_param[5]; -}; - -struct viridian_time_ref_count -{ - unsigned long flags; - -#define _TRC_accessed 0 -#define TRC_accessed (1 << _TRC_accessed) -#define _TRC_running 1 -#define TRC_running (1 << _TRC_running) - - uint64_t val; - int64_t off; -}; - -enum { - _HCALL_spin_wait, - _HCALL_flush, - _HCALL_flush_ex, - _HCALL_ipi, - _HCALL_ipi_ex, - _HCALL_nr /* must be last */ -}; - -struct viridian_domain -{ - union hv_guest_os_id guest_os_id; - union hv_vp_assist_page_msr hypercall_gpa; - DECLARE_BITMAP(hypercall_flags, _HCALL_nr); - struct viridian_time_ref_count time_ref_count; - struct viridian_page reference_tsc; -}; - -void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, - uint32_t subleaf, struct cpuid_leaf *res); - -int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val); -int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val); - -int -viridian_hypercall(struct cpu_user_regs *regs); - -void viridian_time_domain_freeze(const struct domain *d); -void viridian_time_domain_thaw(const struct domain *d); - -int viridian_vcpu_init(struct vcpu *v); -int viridian_domain_init(struct domain *d); - -void viridian_vcpu_deinit(struct vcpu *v); -void viridian_domain_deinit(struct domain *d); - -void viridian_apic_assist_set(const struct vcpu *v); -bool viridian_apic_assist_completed(const struct vcpu *v); -void viridian_apic_assist_clear(const struct vcpu *v); - -void viridian_synic_poll(struct vcpu *v); -bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v, - unsigned int vector); - -#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h deleted file mode 100644 index 8f908928c3..0000000000 --- a/xen/include/asm-x86/hvm/vlapic.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * hvm_vlapic.h: virtualize LAPIC definitions. - * - * Copyright (c) 2004, Intel Corporation. - * Copyright (c) 2006 Keir Fraser, XenSource Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_VLAPIC_H__ -#define __ASM_X86_HVM_VLAPIC_H__ - -#include -#include - -#define vcpu_vlapic(x) (&(x)->arch.hvm.vlapic) -#define vlapic_vcpu(x) (container_of((x), struct vcpu, arch.hvm.vlapic)) -#define const_vlapic_vcpu(x) (container_of((x), const struct vcpu, \ - arch.hvm.vlapic)) -#define vlapic_domain(x) (vlapic_vcpu(x)->domain) - -#define _VLAPIC_ID(vlapic, id) (vlapic_x2apic_mode(vlapic) \ - ? (id) : GET_xAPIC_ID(id)) -#define VLAPIC_ID(vlapic) _VLAPIC_ID(vlapic, vlapic_get_reg(vlapic, APIC_ID)) - -/* - * APIC can be disabled in two ways: - * 1. 'Hardware disable': via IA32_APIC_BASE_MSR[11] - * CPU should behave as if it does not have an APIC. - * 2. 'Software disable': via APIC_SPIV[8]. - * APIC is visible but does not respond to interrupt messages. - */ -#define VLAPIC_HW_DISABLED 0x1 -#define VLAPIC_SW_DISABLED 0x2 -#define vlapic_sw_disabled(vlapic) ((vlapic)->hw.disabled & VLAPIC_SW_DISABLED) -#define vlapic_hw_disabled(vlapic) ((vlapic)->hw.disabled & VLAPIC_HW_DISABLED) -#define vlapic_disabled(vlapic) ((vlapic)->hw.disabled) -#define vlapic_enabled(vlapic) (!vlapic_disabled(vlapic)) - -#define vlapic_base_address(vlapic) \ - ((vlapic)->hw.apic_base_msr & APIC_BASE_ADDR_MASK) -/* Only check EXTD bit as EXTD can't be set if it is disabled by hardware */ -#define vlapic_x2apic_mode(vlapic) \ - ((vlapic)->hw.apic_base_msr & APIC_BASE_EXTD) -#define vlapic_xapic_mode(vlapic) \ - (!vlapic_hw_disabled(vlapic) && \ - !((vlapic)->hw.apic_base_msr & APIC_BASE_EXTD)) - -/* - * Generic APIC bitmap vector update & search routines. - */ - -#define VEC_POS(v) ((v) % 32) -#define REG_POS(v) (((v) / 32) * 0x10) -#define vlapic_test_vector(vec, bitmap) \ - test_bit(VEC_POS(vec), (const uint32_t *)((bitmap) + REG_POS(vec))) -#define vlapic_test_and_set_vector(vec, bitmap) \ - test_and_set_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) -#define vlapic_test_and_clear_vector(vec, bitmap) \ - test_and_clear_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) -#define vlapic_set_vector(vec, bitmap) \ - set_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) -#define vlapic_clear_vector(vec, bitmap) \ - clear_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec))) - -struct vlapic { - struct hvm_hw_lapic hw; - struct hvm_hw_lapic_regs *regs; - struct { - bool_t hw, regs; - uint32_t id, ldr; - } loaded; - spinlock_t esr_lock; - struct periodic_time pt; - s_time_t timer_last_update; - struct page_info *regs_page; - /* INIT-SIPI-SIPI work gets deferred to a tasklet. */ - struct { - uint32_t icr, dest; - struct tasklet tasklet; - } init_sipi; -}; - -/* vlapic's frequence is 100 MHz */ -#define APIC_BUS_CYCLE_NS 10 - -static inline uint32_t vlapic_get_reg(const struct vlapic *vlapic, - uint32_t reg) -{ - return *((uint32_t *)(&vlapic->regs->data[reg])); -} - -static inline void vlapic_set_reg( - struct vlapic *vlapic, uint32_t reg, uint32_t val) -{ - *((uint32_t *)(&vlapic->regs->data[reg])) = val; -} - -void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val); - -bool_t is_vlapic_lvtpc_enabled(struct vlapic *vlapic); - -bool vlapic_test_irq(const struct vlapic *vlapic, uint8_t vec); -void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig); - -int vlapic_has_pending_irq(struct vcpu *v); -int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack); - -int vlapic_init(struct vcpu *v); -void vlapic_destroy(struct vcpu *v); - -void vlapic_reset(struct vlapic *vlapic); - -int guest_wrmsr_apic_base(struct vcpu *v, uint64_t val); -int guest_rdmsr_x2apic(const struct vcpu *v, uint32_t msr, uint64_t *val); -int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t val); - -void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value); -uint64_t vlapic_tdt_msr_get(struct vlapic *vlapic); - -int vlapic_accept_pic_intr(struct vcpu *v); -uint32_t vlapic_set_ppr(struct vlapic *vlapic); - -void vlapic_adjust_i8259_target(struct domain *d); - -void vlapic_EOI_set(struct vlapic *vlapic); -void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector); - -void vlapic_ipi(struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high); - -int vlapic_apicv_write(struct vcpu *v, unsigned int offset); - -struct vlapic *vlapic_lowest_prio( - struct domain *d, const struct vlapic *source, - int short_hand, uint32_t dest, bool_t dest_mode); - -bool_t vlapic_match_dest( - const struct vlapic *target, const struct vlapic *source, - int short_hand, uint32_t dest, bool_t dest_mode); - -static inline void vlapic_sync_pir_to_irr(struct vcpu *v) -{ - if ( hvm_funcs.sync_pir_to_irr ) - alternative_vcall(hvm_funcs.sync_pir_to_irr, v); -} - -#endif /* __ASM_X86_HVM_VLAPIC_H__ */ diff --git a/xen/include/asm-x86/hvm/vm_event.h b/xen/include/asm-x86/hvm/vm_event.h deleted file mode 100644 index 28cb07ce8f..0000000000 --- a/xen/include/asm-x86/hvm/vm_event.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * include/asm-x86/hvm/vm_event.h - * - * Hardware virtual machine vm_event abstractions. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_VM_EVENT_H__ -#define __ASM_X86_HVM_VM_EVENT_H__ - -void hvm_vm_event_do_resume(struct vcpu *v); - -#endif /* __ASM_X86_HVM_VM_EVENT_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h deleted file mode 100644 index 03c9ccf627..0000000000 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ /dev/null @@ -1,688 +0,0 @@ -/* - * vmcs.h: VMCS related definitions - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ -#ifndef __ASM_X86_HVM_VMX_VMCS_H__ -#define __ASM_X86_HVM_VMX_VMCS_H__ - -#include - -extern void vmcs_dump_vcpu(struct vcpu *v); -extern int vmx_vmcs_init(void); -extern int vmx_cpu_up_prepare(unsigned int cpu); -extern void vmx_cpu_dead(unsigned int cpu); -extern int vmx_cpu_up(void); -extern void vmx_cpu_down(void); - -struct vmcs_struct { - u32 vmcs_revision_id; - unsigned char data [0]; /* vmcs size is read from MSR */ -}; - -struct vmx_msr_entry { - u32 index; - u32 mbz; - u64 data; -}; - -#define EPT_DEFAULT_MT MTRR_TYPE_WRBACK - -struct ept_data { - union { - struct { - uint64_t mt:3, /* Memory Type. */ - wl:3, /* Walk length -1. */ - ad:1, /* Enable EPT A/D bits. */ - :5, /* rsvd. */ - mfn:52; - }; - u64 eptp; - }; - /* Set of PCPUs needing an INVEPT before a VMENTER. */ - cpumask_var_t invalidate; -}; - -#define _VMX_DOMAIN_PML_ENABLED 0 -#define VMX_DOMAIN_PML_ENABLED (1ul << _VMX_DOMAIN_PML_ENABLED) -struct vmx_domain { - /* VMX_DOMAIN_* */ - unsigned int status; - - /* - * Domain permitted to use Executable EPT Superpages? Cleared to work - * around CVE-2018-12207 as appropriate. - */ - bool exec_sp; -}; - -/* - * Layout of the MSR bitmap, as interpreted by hardware: - * - *_low covers MSRs 0 to 0x1fff - * - *_ligh covers MSRs 0xc0000000 to 0xc0001fff - */ -struct vmx_msr_bitmap { - unsigned long read_low [0x2000 / BITS_PER_LONG]; - unsigned long read_high [0x2000 / BITS_PER_LONG]; - unsigned long write_low [0x2000 / BITS_PER_LONG]; - unsigned long write_high[0x2000 / BITS_PER_LONG]; -}; - -struct pi_desc { - DECLARE_BITMAP(pir, X86_NR_VECTORS); - union { - struct { - u16 on : 1, /* bit 256 - Outstanding Notification */ - sn : 1, /* bit 257 - Suppress Notification */ - rsvd_1 : 14; /* bit 271:258 - Reserved */ - u8 nv; /* bit 279:272 - Notification Vector */ - u8 rsvd_2; /* bit 287:280 - Reserved */ - u32 ndst; /* bit 319:288 - Notification Destination */ - }; - u64 control; - }; - u32 rsvd[6]; -} __attribute__ ((aligned (64))); - -#define NR_PML_ENTRIES 512 - -struct pi_blocking_vcpu { - struct list_head list; - spinlock_t *lock; -}; - -struct vmx_vcpu { - /* Physical address of VMCS. */ - paddr_t vmcs_pa; - /* VMCS shadow machine address. */ - paddr_t vmcs_shadow_maddr; - - /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */ - spinlock_t vmcs_lock; - - /* - * Activation and launch status of this VMCS. - * - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR. - * - Launched on active CPU by VMLAUNCH when current VMCS. - */ - struct list_head active_list; - int active_cpu; - int launched; - - /* Cache of cpu execution control. */ - u32 exec_control; - u32 secondary_exec_control; - u32 exception_bitmap; - - uint64_t shadow_gs; - uint64_t star; - uint64_t lstar; - uint64_t cstar; - uint64_t sfmask; - - struct vmx_msr_bitmap *msr_bitmap; - - /* - * Most accesses to the MSR host/guest load/save lists are in current - * context. However, the data can be modified by toolstack/migration - * actions. Remote access is only permitted for paused vcpus, and is - * protected under the domctl lock. - */ - struct vmx_msr_entry *msr_area; - struct vmx_msr_entry *host_msr_area; - unsigned int msr_load_count; - unsigned int msr_save_count; - unsigned int host_msr_count; - - unsigned long eoi_exitmap_changed; - DECLARE_BITMAP(eoi_exit_bitmap, X86_NR_VECTORS); - struct pi_desc pi_desc; - - unsigned long host_cr0; - - /* Do we need to tolerate a spurious EPT_MISCONFIG VM exit? */ - bool_t ept_spurious_misconfig; - - /* Processor Trace configured and enabled for the vcpu. */ - bool ipt_active; - - /* Is the guest in real mode? */ - uint8_t vmx_realmode; - /* Are we emulating rather than VMENTERing? */ - uint8_t vmx_emulate; - - uint8_t lbr_flags; - - /* Bitmask of segments that we can't safely use in virtual 8086 mode */ - uint16_t vm86_segment_mask; - /* Shadow CS, SS, DS, ES, FS, GS, TR while in virtual 8086 mode */ - struct segment_register vm86_saved_seg[x86_seg_tr + 1]; - /* Remember EFLAGS while in virtual 8086 mode */ - uint32_t vm86_saved_eflags; - int hostenv_migrated; - - /* Bitmap to control vmexit policy for Non-root VMREAD/VMWRITE */ - struct page_info *vmread_bitmap; - struct page_info *vmwrite_bitmap; - - struct page_info *pml_pg; - - /* Bitmask of trapped CR4 bits. */ - unsigned long cr4_host_mask; - - /* - * Before it is blocked, vCPU is added to the per-cpu list. - * VT-d engine can send wakeup notification event to the - * pCPU and wakeup the related vCPU. - */ - struct pi_blocking_vcpu pi_blocking; -}; - -int vmx_create_vmcs(struct vcpu *v); -void vmx_destroy_vmcs(struct vcpu *v); -void vmx_vmcs_enter(struct vcpu *v); -bool_t __must_check vmx_vmcs_try_enter(struct vcpu *v); -void vmx_vmcs_exit(struct vcpu *v); -void vmx_vmcs_reload(struct vcpu *v); - -#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 -#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 -#define CPU_BASED_HLT_EXITING 0x00000080 -#define CPU_BASED_INVLPG_EXITING 0x00000200 -#define CPU_BASED_MWAIT_EXITING 0x00000400 -#define CPU_BASED_RDPMC_EXITING 0x00000800 -#define CPU_BASED_RDTSC_EXITING 0x00001000 -#define CPU_BASED_CR3_LOAD_EXITING 0x00008000 -#define CPU_BASED_CR3_STORE_EXITING 0x00010000 -#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 -#define CPU_BASED_CR8_STORE_EXITING 0x00100000 -#define CPU_BASED_TPR_SHADOW 0x00200000 -#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 -#define CPU_BASED_MOV_DR_EXITING 0x00800000 -#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 -#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 -#define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 -#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000 -#define CPU_BASED_MONITOR_EXITING 0x20000000 -#define CPU_BASED_PAUSE_EXITING 0x40000000 -#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 -extern u32 vmx_cpu_based_exec_control; - -#define PIN_BASED_EXT_INTR_MASK 0x00000001 -#define PIN_BASED_NMI_EXITING 0x00000008 -#define PIN_BASED_VIRTUAL_NMIS 0x00000020 -#define PIN_BASED_PREEMPT_TIMER 0x00000040 -#define PIN_BASED_POSTED_INTERRUPT 0x00000080 -extern u32 vmx_pin_based_exec_control; - -#define VM_EXIT_SAVE_DEBUG_CNTRLS 0x00000004 -#define VM_EXIT_IA32E_MODE 0x00000200 -#define VM_EXIT_LOAD_PERF_GLOBAL_CTRL 0x00001000 -#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 -#define VM_EXIT_SAVE_GUEST_PAT 0x00040000 -#define VM_EXIT_LOAD_HOST_PAT 0x00080000 -#define VM_EXIT_SAVE_GUEST_EFER 0x00100000 -#define VM_EXIT_LOAD_HOST_EFER 0x00200000 -#define VM_EXIT_SAVE_PREEMPT_TIMER 0x00400000 -#define VM_EXIT_CLEAR_BNDCFGS 0x00800000 -extern u32 vmx_vmexit_control; - -#define VM_ENTRY_IA32E_MODE 0x00000200 -#define VM_ENTRY_SMM 0x00000400 -#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 -#define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL 0x00002000 -#define VM_ENTRY_LOAD_GUEST_PAT 0x00004000 -#define VM_ENTRY_LOAD_GUEST_EFER 0x00008000 -#define VM_ENTRY_LOAD_BNDCFGS 0x00010000 -extern u32 vmx_vmentry_control; - -#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 -#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 -#define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004 -#define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008 -#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 -#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 -#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 -#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 -#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 -#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 -#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 -#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 -#define SECONDARY_EXEC_ENABLE_VM_FUNCTIONS 0x00002000 -#define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING 0x00004000 -#define SECONDARY_EXEC_ENABLE_PML 0x00020000 -#define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS 0x00040000 -#define SECONDARY_EXEC_XSAVES 0x00100000 -#define SECONDARY_EXEC_TSC_SCALING 0x02000000 -extern u32 vmx_secondary_exec_control; - -#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001 -#define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040 -#define VMX_EPT_MEMORY_TYPE_UC 0x00000100 -#define VMX_EPT_MEMORY_TYPE_WB 0x00004000 -#define VMX_EPT_SUPERPAGE_2MB 0x00010000 -#define VMX_EPT_SUPERPAGE_1GB 0x00020000 -#define VMX_EPT_INVEPT_INSTRUCTION 0x00100000 -#define VMX_EPT_AD_BIT 0x00200000 -#define VMX_EPT_INVEPT_SINGLE_CONTEXT 0x02000000 -#define VMX_EPT_INVEPT_ALL_CONTEXT 0x04000000 -#define VMX_VPID_INVVPID_INSTRUCTION 0x00100000000ULL -#define VMX_VPID_INVVPID_INDIVIDUAL_ADDR 0x10000000000ULL -#define VMX_VPID_INVVPID_SINGLE_CONTEXT 0x20000000000ULL -#define VMX_VPID_INVVPID_ALL_CONTEXT 0x40000000000ULL -#define VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 0x80000000000ULL -extern u64 vmx_ept_vpid_cap; - -#define VMX_MISC_PROC_TRACE 0x00004000 -#define VMX_MISC_CR3_TARGET 0x01ff0000 -#define VMX_MISC_VMWRITE_ALL 0x20000000 - -#define VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL - -#define cpu_has_wbinvd_exiting \ - (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING) -#define cpu_has_vmx_virtualize_apic_accesses \ - (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) -#define cpu_has_vmx_tpr_shadow \ - (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) -#define cpu_has_vmx_vnmi \ - (vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS) -#define cpu_has_vmx_msr_bitmap \ - (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP) -#define cpu_has_vmx_secondary_exec_control \ - (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) -#define cpu_has_vmx_ept \ - (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) -#define cpu_has_vmx_dt_exiting \ - (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING) -#define cpu_has_vmx_vpid \ - (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) -#define cpu_has_monitor_trap_flag \ - (vmx_cpu_based_exec_control & CPU_BASED_MONITOR_TRAP_FLAG) -#define cpu_has_vmx_pat \ - (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_PAT) -#define cpu_has_vmx_efer \ - (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_EFER) -#define cpu_has_vmx_unrestricted_guest \ - (vmx_secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST) -#define vmx_unrestricted_guest(v) \ - ((v)->arch.hvm.vmx.secondary_exec_control & \ - SECONDARY_EXEC_UNRESTRICTED_GUEST) -#define cpu_has_vmx_ple \ - (vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) -#define cpu_has_vmx_apic_reg_virt \ - (vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT) -#define cpu_has_vmx_virtual_intr_delivery \ - (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) -#define cpu_has_vmx_virtualize_x2apic_mode \ - (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) -#define cpu_has_vmx_posted_intr_processing \ - (vmx_pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT) -#define cpu_has_vmx_vmcs_shadowing \ - (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VMCS_SHADOWING) -#define cpu_has_vmx_vmfunc \ - (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) -#define cpu_has_vmx_virt_exceptions \ - (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS) -#define cpu_has_vmx_pml \ - (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML) -#define cpu_has_vmx_mpx \ - ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \ - (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS)) -#define cpu_has_vmx_xsaves \ - (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES) -#define cpu_has_vmx_tsc_scaling \ - (vmx_secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) - -#define VMCS_RID_TYPE_MASK 0x80000000 - -/* GUEST_INTERRUPTIBILITY_INFO flags. */ -#define VMX_INTR_SHADOW_STI 0x00000001 -#define VMX_INTR_SHADOW_MOV_SS 0x00000002 -#define VMX_INTR_SHADOW_SMI 0x00000004 -#define VMX_INTR_SHADOW_NMI 0x00000008 - -#define VMX_BASIC_REVISION_MASK 0x7fffffff -#define VMX_BASIC_VMCS_SIZE_MASK (0x1fffULL << 32) -#define VMX_BASIC_32BIT_ADDRESSES (1ULL << 48) -#define VMX_BASIC_DUAL_MONITOR (1ULL << 49) -#define VMX_BASIC_MEMORY_TYPE_MASK (0xfULL << 50) -#define VMX_BASIC_INS_OUT_INFO (1ULL << 54) -/* - * bit 55 of IA32_VMX_BASIC MSR, indicating whether any VMX controls that - * default to 1 may be cleared to 0. - */ -#define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55) - -extern u64 vmx_basic_msr; -#define cpu_has_vmx_ins_outs_instr_info \ - (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO)) - -/* Guest interrupt status */ -#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF -#define VMX_GUEST_INTR_STATUS_SVI_OFFSET 8 - -/* VMFUNC leaf definitions */ -#define VMX_VMFUNC_EPTP_SWITCHING (1ULL << 0) - -/* VMCS field encodings. */ -#define VMCS_HIGH(x) ((x) | 1) -enum vmcs_field { - VIRTUAL_PROCESSOR_ID = 0x00000000, - POSTED_INTR_NOTIFICATION_VECTOR = 0x00000002, - EPTP_INDEX = 0x00000004, -#define GUEST_SEG_SELECTOR(sel) (GUEST_ES_SELECTOR + (sel) * 2) /* ES ... GS */ - GUEST_ES_SELECTOR = 0x00000800, - GUEST_CS_SELECTOR = 0x00000802, - GUEST_SS_SELECTOR = 0x00000804, - GUEST_DS_SELECTOR = 0x00000806, - GUEST_FS_SELECTOR = 0x00000808, - GUEST_GS_SELECTOR = 0x0000080a, - GUEST_LDTR_SELECTOR = 0x0000080c, - GUEST_TR_SELECTOR = 0x0000080e, - GUEST_INTR_STATUS = 0x00000810, - GUEST_PML_INDEX = 0x00000812, - HOST_ES_SELECTOR = 0x00000c00, - HOST_CS_SELECTOR = 0x00000c02, - HOST_SS_SELECTOR = 0x00000c04, - HOST_DS_SELECTOR = 0x00000c06, - HOST_FS_SELECTOR = 0x00000c08, - HOST_GS_SELECTOR = 0x00000c0a, - HOST_TR_SELECTOR = 0x00000c0c, - IO_BITMAP_A = 0x00002000, - IO_BITMAP_B = 0x00002002, - MSR_BITMAP = 0x00002004, - VM_EXIT_MSR_STORE_ADDR = 0x00002006, - VM_EXIT_MSR_LOAD_ADDR = 0x00002008, - VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, - PML_ADDRESS = 0x0000200e, - TSC_OFFSET = 0x00002010, - VIRTUAL_APIC_PAGE_ADDR = 0x00002012, - APIC_ACCESS_ADDR = 0x00002014, - PI_DESC_ADDR = 0x00002016, - VM_FUNCTION_CONTROL = 0x00002018, - EPT_POINTER = 0x0000201a, - EOI_EXIT_BITMAP0 = 0x0000201c, -#define EOI_EXIT_BITMAP(n) (EOI_EXIT_BITMAP0 + (n) * 2) /* n = 0...3 */ - EPTP_LIST_ADDR = 0x00002024, - VMREAD_BITMAP = 0x00002026, - VMWRITE_BITMAP = 0x00002028, - VIRT_EXCEPTION_INFO = 0x0000202a, - XSS_EXIT_BITMAP = 0x0000202c, - TSC_MULTIPLIER = 0x00002032, - GUEST_PHYSICAL_ADDRESS = 0x00002400, - VMCS_LINK_POINTER = 0x00002800, - GUEST_IA32_DEBUGCTL = 0x00002802, - GUEST_PAT = 0x00002804, - GUEST_EFER = 0x00002806, - GUEST_PERF_GLOBAL_CTRL = 0x00002808, - GUEST_PDPTE0 = 0x0000280a, -#define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */ - GUEST_BNDCFGS = 0x00002812, - HOST_PAT = 0x00002c00, - HOST_EFER = 0x00002c02, - HOST_PERF_GLOBAL_CTRL = 0x00002c04, - PIN_BASED_VM_EXEC_CONTROL = 0x00004000, - CPU_BASED_VM_EXEC_CONTROL = 0x00004002, - EXCEPTION_BITMAP = 0x00004004, - PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, - PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, - CR3_TARGET_COUNT = 0x0000400a, - VM_EXIT_CONTROLS = 0x0000400c, - VM_EXIT_MSR_STORE_COUNT = 0x0000400e, - VM_EXIT_MSR_LOAD_COUNT = 0x00004010, - VM_ENTRY_CONTROLS = 0x00004012, - VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, - VM_ENTRY_INTR_INFO = 0x00004016, - VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, - VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, - TPR_THRESHOLD = 0x0000401c, - SECONDARY_VM_EXEC_CONTROL = 0x0000401e, - PLE_GAP = 0x00004020, - PLE_WINDOW = 0x00004022, - VM_INSTRUCTION_ERROR = 0x00004400, - VM_EXIT_REASON = 0x00004402, - VM_EXIT_INTR_INFO = 0x00004404, - VM_EXIT_INTR_ERROR_CODE = 0x00004406, - IDT_VECTORING_INFO = 0x00004408, - IDT_VECTORING_ERROR_CODE = 0x0000440a, - VM_EXIT_INSTRUCTION_LEN = 0x0000440c, - VMX_INSTRUCTION_INFO = 0x0000440e, -#define GUEST_SEG_LIMIT(sel) (GUEST_ES_LIMIT + (sel) * 2) /* ES ... GS */ - GUEST_ES_LIMIT = 0x00004800, - GUEST_CS_LIMIT = 0x00004802, - GUEST_SS_LIMIT = 0x00004804, - GUEST_DS_LIMIT = 0x00004806, - GUEST_FS_LIMIT = 0x00004808, - GUEST_GS_LIMIT = 0x0000480a, - GUEST_LDTR_LIMIT = 0x0000480c, - GUEST_TR_LIMIT = 0x0000480e, - GUEST_GDTR_LIMIT = 0x00004810, - GUEST_IDTR_LIMIT = 0x00004812, -#define GUEST_SEG_AR_BYTES(sel) (GUEST_ES_AR_BYTES + (sel) * 2) /* ES ... GS */ - GUEST_ES_AR_BYTES = 0x00004814, - GUEST_CS_AR_BYTES = 0x00004816, - GUEST_SS_AR_BYTES = 0x00004818, - GUEST_DS_AR_BYTES = 0x0000481a, - GUEST_FS_AR_BYTES = 0x0000481c, - GUEST_GS_AR_BYTES = 0x0000481e, - GUEST_LDTR_AR_BYTES = 0x00004820, - GUEST_TR_AR_BYTES = 0x00004822, - GUEST_INTERRUPTIBILITY_INFO = 0x00004824, - GUEST_ACTIVITY_STATE = 0x00004826, - GUEST_SMBASE = 0x00004828, - GUEST_SYSENTER_CS = 0x0000482a, - GUEST_PREEMPTION_TIMER = 0x0000482e, - HOST_SYSENTER_CS = 0x00004c00, - CR0_GUEST_HOST_MASK = 0x00006000, - CR4_GUEST_HOST_MASK = 0x00006002, - CR0_READ_SHADOW = 0x00006004, - CR4_READ_SHADOW = 0x00006006, - CR3_TARGET_VALUE0 = 0x00006008, -#define CR3_TARGET_VALUE(n) (CR3_TARGET_VALUE0 + (n) * 2) /* n < CR3_TARGET_COUNT */ - EXIT_QUALIFICATION = 0x00006400, - GUEST_LINEAR_ADDRESS = 0x0000640a, - GUEST_CR0 = 0x00006800, - GUEST_CR3 = 0x00006802, - GUEST_CR4 = 0x00006804, -#define GUEST_SEG_BASE(sel) (GUEST_ES_BASE + (sel) * 2) /* ES ... GS */ - GUEST_ES_BASE = 0x00006806, - GUEST_CS_BASE = 0x00006808, - GUEST_SS_BASE = 0x0000680a, - GUEST_DS_BASE = 0x0000680c, - GUEST_FS_BASE = 0x0000680e, - GUEST_GS_BASE = 0x00006810, - GUEST_LDTR_BASE = 0x00006812, - GUEST_TR_BASE = 0x00006814, - GUEST_GDTR_BASE = 0x00006816, - GUEST_IDTR_BASE = 0x00006818, - GUEST_DR7 = 0x0000681a, - GUEST_RSP = 0x0000681c, - GUEST_RIP = 0x0000681e, - GUEST_RFLAGS = 0x00006820, - GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, - GUEST_SYSENTER_ESP = 0x00006824, - GUEST_SYSENTER_EIP = 0x00006826, - HOST_CR0 = 0x00006c00, - HOST_CR3 = 0x00006c02, - HOST_CR4 = 0x00006c04, - HOST_FS_BASE = 0x00006c06, - HOST_GS_BASE = 0x00006c08, - HOST_TR_BASE = 0x00006c0a, - HOST_GDTR_BASE = 0x00006c0c, - HOST_IDTR_BASE = 0x00006c0e, - HOST_SYSENTER_ESP = 0x00006c10, - HOST_SYSENTER_EIP = 0x00006c12, - HOST_RSP = 0x00006c14, - HOST_RIP = 0x00006c16, -}; - -#define VMCS_VPID_WIDTH 16 - -/* VM Instruction error numbers */ -enum vmx_insn_errno -{ - VMX_INSN_SUCCEED = 0, - VMX_INSN_VMCLEAR_INVALID_PHYADDR = 2, - VMX_INSN_VMCLEAR_WITH_VMXON_PTR = 3, - VMX_INSN_VMLAUNCH_NONCLEAR_VMCS = 4, - VMX_INSN_VMRESUME_NONLAUNCHED_VMCS = 5, - VMX_INSN_INVALID_CONTROL_STATE = 7, - VMX_INSN_INVALID_HOST_STATE = 8, - VMX_INSN_VMPTRLD_INVALID_PHYADDR = 9, - VMX_INSN_VMPTRLD_WITH_VMXON_PTR = 10, - VMX_INSN_VMPTRLD_INCORRECT_VMCS_ID = 11, - VMX_INSN_UNSUPPORTED_VMCS_COMPONENT = 12, - VMX_INSN_VMXON_IN_VMX_ROOT = 15, - VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS = 26, - VMX_INSN_INVEPT_INVVPID_INVALID_OP = 28, - VMX_INSN_FAIL_INVALID = ~0, -}; - -/* MSR load/save list infrastructure. */ -enum vmx_msr_list_type { - VMX_MSR_HOST, /* MSRs loaded on VMExit. */ - VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */ - VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only. */ -}; - -/** - * Add an MSR to an MSR list (inserting space for the entry if necessary), and - * set the MSRs value. - * - * It is undefined behaviour to try and insert the same MSR into both the - * GUEST and GUEST_LOADONLY list. - * - * May fail if unable to allocate memory for the list, or the total number of - * entries exceeds the memory allocated. - */ -int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val, - enum vmx_msr_list_type type); - -/** - * Remove an MSR entry from an MSR list. Returns -ESRCH if the MSR was not - * found in the list. - */ -int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type); - -static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr, uint64_t val) -{ - return vmx_add_msr(v, msr, val, VMX_MSR_GUEST); -} -static inline int vmx_add_host_load_msr(struct vcpu *v, uint32_t msr, - uint64_t val) -{ - return vmx_add_msr(v, msr, val, VMX_MSR_HOST); -} - -struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr, - enum vmx_msr_list_type type); - -static inline int vmx_read_guest_msr(const struct vcpu *v, uint32_t msr, - uint64_t *val) -{ - const struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST); - - if ( !ent ) - { - *val = 0; - return -ESRCH; - } - - *val = ent->data; - - return 0; -} - -static inline int vmx_read_guest_loadonly_msr( - const struct vcpu *v, uint32_t msr, uint64_t *val) -{ - const struct vmx_msr_entry *ent = - vmx_find_msr(v, msr, VMX_MSR_GUEST_LOADONLY); - - if ( !ent ) - { - *val = 0; - return -ESRCH; - } - - *val = ent->data; - - return 0; -} - -static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr, - uint64_t val) -{ - struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST); - - if ( !ent ) - return -ESRCH; - - ent->data = val; - - return 0; -} - - -/* MSR intercept bitmap infrastructure. */ -enum vmx_msr_intercept_type { - VMX_MSR_R = 1, - VMX_MSR_W = 2, - VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W, -}; - -void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, - enum vmx_msr_intercept_type type); -void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, - enum vmx_msr_intercept_type type); -void vmx_vmcs_switch(paddr_t from, paddr_t to); -void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector); -void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector); -bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap, - unsigned int msr, bool is_write) __nonnull(1); -void virtual_vmcs_enter(const struct vcpu *); -void virtual_vmcs_exit(const struct vcpu *); -u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding); -enum vmx_insn_errno virtual_vmcs_vmread_safe(const struct vcpu *v, - u32 vmcs_encoding, u64 *val); -void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val); -enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v, - u32 vmcs_encoding, u64 val); - -DECLARE_PER_CPU(bool_t, vmxon); - -bool_t vmx_vcpu_pml_enabled(const struct vcpu *v); -int vmx_vcpu_enable_pml(struct vcpu *v); -void vmx_vcpu_disable_pml(struct vcpu *v); -void vmx_vcpu_flush_pml_buffer(struct vcpu *v); -bool_t vmx_domain_pml_enabled(const struct domain *d); -int vmx_domain_enable_pml(struct domain *d); -void vmx_domain_disable_pml(struct domain *d); -void vmx_domain_flush_pml_buffers(struct domain *d); - -void vmx_domain_update_eptp(struct domain *d); - -#endif /* ASM_X86_HVM_VMX_VMCS_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h deleted file mode 100644 index 85530d2e0e..0000000000 --- a/xen/include/asm-x86/hvm/vmx/vmx.h +++ /dev/null @@ -1,692 +0,0 @@ -/* - * vmx.h: VMX Architecture related definitions - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ -#ifndef __ASM_X86_HVM_VMX_VMX_H__ -#define __ASM_X86_HVM_VMX_VMX_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -extern int8_t opt_ept_exec_sp; - -typedef union { - struct { - u64 r : 1, /* bit 0 - Read permission */ - w : 1, /* bit 1 - Write permission */ - x : 1, /* bit 2 - Execute permission */ - emt : 3, /* bits 5:3 - EPT Memory type */ - ipat : 1, /* bit 6 - Ignore PAT memory type */ - sp : 1, /* bit 7 - Is this a superpage? */ - a : 1, /* bit 8 - Access bit */ - d : 1, /* bit 9 - Dirty bit */ - recalc : 1, /* bit 10 - Software available 1 */ - snp : 1, /* bit 11 - VT-d snoop control in shared - EPT/VT-d usage */ - mfn : 40, /* bits 51:12 - Machine physical frame number */ - sa_p2mt : 6, /* bits 57:52 - Software available 2 */ - access : 4, /* bits 61:58 - p2m_access_t */ - _rsvd : 1, /* bit 62 - reserved */ - suppress_ve : 1; /* bit 63 - suppress #VE */ - }; - u64 epte; -} ept_entry_t; - -typedef struct { - /*use lxe[0] to save result */ - ept_entry_t lxe[5]; -} ept_walk_t; - -typedef enum { - ept_access_n = 0, /* No access permissions allowed */ - ept_access_r = 1, /* Read only */ - ept_access_w = 2, /* Write only */ - ept_access_rw = 3, /* Read & Write */ - ept_access_x = 4, /* Exec Only */ - ept_access_rx = 5, /* Read & Exec */ - ept_access_wx = 6, /* Write & Exec*/ - ept_access_all = 7, /* Full permissions */ -} ept_access_t; - -#define EPT_TABLE_ORDER 9 -#define EPTE_SUPER_PAGE_MASK 0x80 -#define EPTE_MFN_MASK 0xffffffffff000ULL -#define EPTE_AVAIL1_MASK 0xF00 -#define EPTE_EMT_MASK 0x38 -#define EPTE_IGMT_MASK 0x40 -#define EPTE_AVAIL1_SHIFT 8 -#define EPTE_EMT_SHIFT 3 -#define EPTE_IGMT_SHIFT 6 -#define EPTE_RWX_MASK 0x7 -#define EPTE_FLAG_MASK 0x7f - -#define EPT_EMT_UC 0 -#define EPT_EMT_WC 1 -#define EPT_EMT_RSV0 2 -#define EPT_EMT_RSV1 3 -#define EPT_EMT_WT 4 -#define EPT_EMT_WP 5 -#define EPT_EMT_WB 6 -#define EPT_EMT_RSV2 7 - -#define PI_xAPIC_NDST_MASK 0xFF00 - -void vmx_asm_vmexit_handler(struct cpu_user_regs); -void vmx_intr_assist(void); -void noreturn vmx_do_resume(void); -void vmx_vlapic_msr_changed(struct vcpu *v); -struct hvm_emulate_ctxt; -void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt); -void vmx_realmode(struct cpu_user_regs *regs); -void vmx_update_debug_state(struct vcpu *v); -void vmx_update_exception_bitmap(struct vcpu *v); -void vmx_update_cpu_exec_control(struct vcpu *v); -void vmx_update_secondary_exec_control(struct vcpu *v); - -#define POSTED_INTR_ON 0 -#define POSTED_INTR_SN 1 -static inline int pi_test_and_set_pir(uint8_t vector, struct pi_desc *pi_desc) -{ - return test_and_set_bit(vector, pi_desc->pir); -} - -static inline int pi_test_pir(uint8_t vector, const struct pi_desc *pi_desc) -{ - return test_bit(vector, pi_desc->pir); -} - -static inline int pi_test_and_set_on(struct pi_desc *pi_desc) -{ - return test_and_set_bit(POSTED_INTR_ON, &pi_desc->control); -} - -static inline void pi_set_on(struct pi_desc *pi_desc) -{ - set_bit(POSTED_INTR_ON, &pi_desc->control); -} - -static inline int pi_test_and_clear_on(struct pi_desc *pi_desc) -{ - return test_and_clear_bit(POSTED_INTR_ON, &pi_desc->control); -} - -static inline int pi_test_on(struct pi_desc *pi_desc) -{ - return pi_desc->on; -} - -static inline unsigned long pi_get_pir(struct pi_desc *pi_desc, int group) -{ - return xchg(&pi_desc->pir[group], 0); -} - -static inline int pi_test_sn(struct pi_desc *pi_desc) -{ - return pi_desc->sn; -} - -static inline void pi_set_sn(struct pi_desc *pi_desc) -{ - set_bit(POSTED_INTR_SN, &pi_desc->control); -} - -static inline void pi_clear_sn(struct pi_desc *pi_desc) -{ - clear_bit(POSTED_INTR_SN, &pi_desc->control); -} - -/* - * Exit Reasons - */ -#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 - -#define EXIT_REASON_EXCEPTION_NMI 0 -#define EXIT_REASON_EXTERNAL_INTERRUPT 1 -#define EXIT_REASON_TRIPLE_FAULT 2 -#define EXIT_REASON_INIT 3 -#define EXIT_REASON_SIPI 4 -#define EXIT_REASON_IO_SMI 5 -#define EXIT_REASON_OTHER_SMI 6 -#define EXIT_REASON_PENDING_VIRT_INTR 7 -#define EXIT_REASON_PENDING_VIRT_NMI 8 -#define EXIT_REASON_TASK_SWITCH 9 -#define EXIT_REASON_CPUID 10 -#define EXIT_REASON_GETSEC 11 -#define EXIT_REASON_HLT 12 -#define EXIT_REASON_INVD 13 -#define EXIT_REASON_INVLPG 14 -#define EXIT_REASON_RDPMC 15 -#define EXIT_REASON_RDTSC 16 -#define EXIT_REASON_RSM 17 -#define EXIT_REASON_VMCALL 18 -#define EXIT_REASON_VMCLEAR 19 -#define EXIT_REASON_VMLAUNCH 20 -#define EXIT_REASON_VMPTRLD 21 -#define EXIT_REASON_VMPTRST 22 -#define EXIT_REASON_VMREAD 23 -#define EXIT_REASON_VMRESUME 24 -#define EXIT_REASON_VMWRITE 25 -#define EXIT_REASON_VMXOFF 26 -#define EXIT_REASON_VMXON 27 -#define EXIT_REASON_CR_ACCESS 28 -#define EXIT_REASON_DR_ACCESS 29 -#define EXIT_REASON_IO_INSTRUCTION 30 -#define EXIT_REASON_MSR_READ 31 -#define EXIT_REASON_MSR_WRITE 32 -#define EXIT_REASON_INVALID_GUEST_STATE 33 -#define EXIT_REASON_MSR_LOADING 34 -#define EXIT_REASON_MWAIT_INSTRUCTION 36 -#define EXIT_REASON_MONITOR_TRAP_FLAG 37 -#define EXIT_REASON_MONITOR_INSTRUCTION 39 -#define EXIT_REASON_PAUSE_INSTRUCTION 40 -#define EXIT_REASON_MCE_DURING_VMENTRY 41 -#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 -#define EXIT_REASON_APIC_ACCESS 44 -#define EXIT_REASON_EOI_INDUCED 45 -#define EXIT_REASON_ACCESS_GDTR_OR_IDTR 46 -#define EXIT_REASON_ACCESS_LDTR_OR_TR 47 -#define EXIT_REASON_EPT_VIOLATION 48 -#define EXIT_REASON_EPT_MISCONFIG 49 -#define EXIT_REASON_INVEPT 50 -#define EXIT_REASON_RDTSCP 51 -#define EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 52 -#define EXIT_REASON_INVVPID 53 -#define EXIT_REASON_WBINVD 54 -#define EXIT_REASON_XSETBV 55 -#define EXIT_REASON_APIC_WRITE 56 -#define EXIT_REASON_INVPCID 58 -#define EXIT_REASON_VMFUNC 59 -#define EXIT_REASON_PML_FULL 62 -#define EXIT_REASON_XSAVES 63 -#define EXIT_REASON_XRSTORS 64 - -/* - * Interruption-information format - */ -#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ -#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ -#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ -#define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000 /* 12 */ -#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ -#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 - -/* - * Exit Qualifications for MOV for Control Register Access - */ -enum { - VMX_CR_ACCESS_TYPE_MOV_TO_CR, - VMX_CR_ACCESS_TYPE_MOV_FROM_CR, - VMX_CR_ACCESS_TYPE_CLTS, - VMX_CR_ACCESS_TYPE_LMSW, -}; -typedef union cr_access_qual { - unsigned long raw; - struct { - uint16_t cr:4, - access_type:2, /* VMX_CR_ACCESS_TYPE_* */ - lmsw_op_type:1, /* 0 => reg, 1 => mem */ - :1, - gpr:4, - :4; - uint16_t lmsw_data; - uint32_t :32; - }; -} __transparent__ cr_access_qual_t; - -/* - * Access Rights - */ -#define X86_SEG_AR_SEG_TYPE 0xf /* 3:0, segment type */ -#define X86_SEG_AR_DESC_TYPE (1u << 4) /* 4, descriptor type */ -#define X86_SEG_AR_DPL 0x60 /* 6:5, descriptor privilege level */ -#define X86_SEG_AR_SEG_PRESENT (1u << 7) /* 7, segment present */ -#define X86_SEG_AR_AVL (1u << 12) /* 12, available for system software */ -#define X86_SEG_AR_CS_LM_ACTIVE (1u << 13) /* 13, long mode active (CS only) */ -#define X86_SEG_AR_DEF_OP_SIZE (1u << 14) /* 14, default operation size */ -#define X86_SEG_AR_GRANULARITY (1u << 15) /* 15, granularity */ -#define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */ - -#define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n" -#define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */ -#define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n" -#define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */ -#define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */ -#define VMREAD_OPCODE ".byte 0x0f,0x78\n" -#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n" -#define VMWRITE_OPCODE ".byte 0x0f,0x79\n" -#define INVEPT_OPCODE ".byte 0x66,0x0f,0x38,0x80\n" /* m128,r64/32 */ -#define INVVPID_OPCODE ".byte 0x66,0x0f,0x38,0x81\n" /* m128,r64/32 */ -#define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n" -#define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n" - -#define MODRM_EAX_08 ".byte 0x08\n" /* ECX, [EAX] */ -#define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */ -#define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */ -#define MODRM_EAX_ECX ".byte 0xc1\n" /* EAX, ECX */ - -extern uint8_t posted_intr_vector; - -#define cpu_has_vmx_ept_exec_only_supported \ - (vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED) - -#define cpu_has_vmx_ept_wl4_supported \ - (vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) -#define cpu_has_vmx_ept_mt_uc (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_UC) -#define cpu_has_vmx_ept_mt_wb (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) -#define cpu_has_vmx_ept_2mb (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB) -#define cpu_has_vmx_ept_1gb (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB) -#define cpu_has_vmx_ept_ad (vmx_ept_vpid_cap & VMX_EPT_AD_BIT) -#define cpu_has_vmx_ept_invept_single_context \ - (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT) - -#define EPT_2MB_SHIFT 16 -#define EPT_1GB_SHIFT 17 -#define ept_has_2mb(c) ((c >> EPT_2MB_SHIFT) & 1) -#define ept_has_1gb(c) ((c >> EPT_1GB_SHIFT) & 1) - -#define INVEPT_SINGLE_CONTEXT 1 -#define INVEPT_ALL_CONTEXT 2 - -#define cpu_has_vmx_vpid_invvpid_individual_addr \ - (vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR) -#define cpu_has_vmx_vpid_invvpid_single_context \ - (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT) -#define cpu_has_vmx_vpid_invvpid_single_context_retaining_global \ - (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL) - -#define INVVPID_INDIVIDUAL_ADDR 0 -#define INVVPID_SINGLE_CONTEXT 1 -#define INVVPID_ALL_CONTEXT 2 -#define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3 - -#ifdef HAVE_AS_VMX -# define GAS_VMX_OP(yes, no) yes -#else -# define GAS_VMX_OP(yes, no) no -#endif - -static always_inline void __vmptrld(u64 addr) -{ - asm volatile ( -#ifdef HAVE_AS_VMX - "vmptrld %0\n" -#else - VMPTRLD_OPCODE MODRM_EAX_06 -#endif - /* CF==1 or ZF==1 --> BUG() */ - UNLIKELY_START(be, vmptrld) - _ASM_BUGFRAME_TEXT(0) - UNLIKELY_END_SECTION - : -#ifdef HAVE_AS_VMX - : "m" (addr), -#else - : "a" (&addr), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - : "memory"); -} - -static always_inline void __vmpclear(u64 addr) -{ - asm volatile ( -#ifdef HAVE_AS_VMX - "vmclear %0\n" -#else - VMCLEAR_OPCODE MODRM_EAX_06 -#endif - /* CF==1 or ZF==1 --> BUG() */ - UNLIKELY_START(be, vmclear) - _ASM_BUGFRAME_TEXT(0) - UNLIKELY_END_SECTION - : -#ifdef HAVE_AS_VMX - : "m" (addr), -#else - : "a" (&addr), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - : "memory"); -} - -static always_inline void __vmread(unsigned long field, unsigned long *value) -{ - asm volatile ( -#ifdef HAVE_AS_VMX - "vmread %1, %0\n\t" -#else - VMREAD_OPCODE MODRM_EAX_ECX -#endif - /* CF==1 or ZF==1 --> BUG() */ - UNLIKELY_START(be, vmread) - _ASM_BUGFRAME_TEXT(0) - UNLIKELY_END_SECTION -#ifdef HAVE_AS_VMX - : "=rm" (*value) - : "r" (field), -#else - : "=c" (*value) - : "a" (field), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - ); -} - -static always_inline void __vmwrite(unsigned long field, unsigned long value) -{ - asm volatile ( -#ifdef HAVE_AS_VMX - "vmwrite %1, %0\n" -#else - VMWRITE_OPCODE MODRM_EAX_ECX -#endif - /* CF==1 or ZF==1 --> BUG() */ - UNLIKELY_START(be, vmwrite) - _ASM_BUGFRAME_TEXT(0) - UNLIKELY_END_SECTION - : -#ifdef HAVE_AS_VMX - : "r" (field) , "rm" (value), -#else - : "a" (field) , "c" (value), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - ); -} - -static inline enum vmx_insn_errno vmread_safe(unsigned long field, - unsigned long *value) -{ - unsigned long ret = VMX_INSN_SUCCEED; - bool fail_invalid, fail_valid; - - asm volatile ( GAS_VMX_OP("vmread %[field], %[value]\n\t", - VMREAD_OPCODE MODRM_EAX_ECX) - ASM_FLAG_OUT(, "setc %[invalid]\n\t") - ASM_FLAG_OUT(, "setz %[valid]\n\t") - : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid), - ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid), - [value] GAS_VMX_OP("=rm", "=c") (*value) - : [field] GAS_VMX_OP("r", "a") (field)); - - if ( unlikely(fail_invalid) ) - ret = VMX_INSN_FAIL_INVALID; - else if ( unlikely(fail_valid) ) - __vmread(VM_INSTRUCTION_ERROR, &ret); - - return ret; -} - -static inline enum vmx_insn_errno vmwrite_safe(unsigned long field, - unsigned long value) -{ - unsigned long ret = VMX_INSN_SUCCEED; - bool fail_invalid, fail_valid; - - asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t", - VMWRITE_OPCODE MODRM_EAX_ECX) - ASM_FLAG_OUT(, "setc %[invalid]\n\t") - ASM_FLAG_OUT(, "setz %[valid]\n\t") - : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid), - ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid) - : [field] GAS_VMX_OP("r", "a") (field), - [value] GAS_VMX_OP("rm", "c") (value)); - - if ( unlikely(fail_invalid) ) - ret = VMX_INSN_FAIL_INVALID; - else if ( unlikely(fail_valid) ) - __vmread(VM_INSTRUCTION_ERROR, &ret); - - return ret; -} - -static always_inline void __invept(unsigned long type, uint64_t eptp) -{ - struct { - uint64_t eptp, rsvd; - } operand = { eptp }; - - /* - * If single context invalidation is not supported, we escalate to - * use all context invalidation. - */ - if ( (type == INVEPT_SINGLE_CONTEXT) && - !cpu_has_vmx_ept_invept_single_context ) - type = INVEPT_ALL_CONTEXT; - - asm volatile ( -#ifdef HAVE_AS_EPT - "invept %0, %1\n" -#else - INVEPT_OPCODE MODRM_EAX_08 -#endif - /* CF==1 or ZF==1 --> BUG() */ - UNLIKELY_START(be, invept) - _ASM_BUGFRAME_TEXT(0) - UNLIKELY_END_SECTION - : -#ifdef HAVE_AS_EPT - : "m" (operand), "r" (type), -#else - : "a" (&operand), "c" (type), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - : "memory" ); -} - -static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva) -{ - struct __packed { - u64 vpid:16; - u64 rsvd:48; - u64 gva; - } operand = {vpid, 0, gva}; - - /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */ - asm volatile ( "1: " -#ifdef HAVE_AS_EPT - "invvpid %0, %1\n" -#else - INVVPID_OPCODE MODRM_EAX_08 -#endif - /* CF==1 or ZF==1 --> BUG() */ - UNLIKELY_START(be, invvpid) - _ASM_BUGFRAME_TEXT(0) - UNLIKELY_END_SECTION "\n" - "2:" - _ASM_EXTABLE(1b, 2b) - : -#ifdef HAVE_AS_EPT - : "m" (operand), "r" (type), -#else - : "a" (&operand), "c" (type), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - : "memory" ); -} - -static inline void ept_sync_all(void) -{ - __invept(INVEPT_ALL_CONTEXT, 0); -} - -void ept_sync_domain(struct p2m_domain *p2m); - -static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva) -{ - int type = INVVPID_INDIVIDUAL_ADDR; - - /* - * If individual address invalidation is not supported, we escalate to - * use single context invalidation. - */ - if ( likely(cpu_has_vmx_vpid_invvpid_individual_addr) ) - goto execute_invvpid; - - type = INVVPID_SINGLE_CONTEXT; - - /* - * If single context invalidation is not supported, we escalate to - * use all context invalidation. - */ - if ( !cpu_has_vmx_vpid_invvpid_single_context ) - type = INVVPID_ALL_CONTEXT; - -execute_invvpid: - __invvpid(type, v->arch.hvm.n1asid.asid, (u64)gva); -} - -static inline void vpid_sync_all(void) -{ - __invvpid(INVVPID_ALL_CONTEXT, 0, 0); -} - -static inline void __vmxoff(void) -{ - asm volatile ( - VMXOFF_OPCODE - : : : "memory" ); -} - -static inline int __vmxon(u64 addr) -{ - int rc; - - asm volatile ( - "1: " VMXON_OPCODE MODRM_EAX_06 "\n" - " setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */ - "2:\n" - ".section .fixup,\"ax\"\n" - "3: sub $2,%0 ; jmp 2b\n" /* #UD or #GP --> rc = -2 */ - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : "=q" (rc) - : "0" (0), "a" (&addr) - : "memory"); - - return rc; -} - -int vmx_guest_x86_mode(struct vcpu *v); -unsigned int vmx_get_cpl(void); - -void vmx_inject_extint(int trap, uint8_t source); -void vmx_inject_nmi(void); - -int ept_p2m_init(struct p2m_domain *p2m); -void ept_p2m_uninit(struct p2m_domain *p2m); - -void ept_walk_table(struct domain *d, unsigned long gfn); -bool_t ept_handle_misconfig(uint64_t gpa); -int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn, - unsigned int order, bool *ipat, p2m_type_t type); -void setup_ept_dump(void); -void p2m_init_altp2m_ept(struct domain *d, unsigned int i); -/* Locate an alternate p2m by its EPTP */ -unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp); - -void update_guest_eip(void); - -void vmx_pi_per_cpu_init(unsigned int cpu); -void vmx_pi_desc_fixup(unsigned int cpu); - -void vmx_sync_exit_bitmap(struct vcpu *v); - -#ifdef CONFIG_HVM -void vmx_pi_hooks_assign(struct domain *d); -void vmx_pi_hooks_deassign(struct domain *d); -#else -static inline void vmx_pi_hooks_assign(struct domain *d) {} -static inline void vmx_pi_hooks_deassign(struct domain *d) {} -#endif - -#define APIC_INVALID_DEST 0xffffffff - -/* EPT violation qualifications definitions */ -typedef union ept_qual { - unsigned long raw; - struct { - bool read:1, write:1, fetch:1, - eff_read:1, eff_write:1, eff_exec:1, /* eff_user_exec */:1, - gla_valid:1, - gla_fault:1; /* Valid iff gla_valid. */ - unsigned long /* pad */:55; - }; -} __transparent__ ept_qual_t; - -#define EPT_L4_PAGETABLE_SHIFT 39 -#define EPT_PAGETABLE_ENTRIES 512 - -/* #VE information page */ -typedef struct { - u32 exit_reason; - u32 semaphore; - u64 exit_qualification; - u64 gla; - u64 gpa; - u16 eptp_index; -} ve_info_t; - -/* VM-Exit instruction info for LIDT, LGDT, SIDT, SGDT */ -typedef union idt_or_gdt_instr_info { - unsigned long raw; - struct { - unsigned long scaling :2, /* bits 0:1 - Scaling */ - :5, /* bits 6:2 - Undefined */ - addr_size :3, /* bits 9:7 - Address size */ - :1, /* bit 10 - Cleared to 0 */ - operand_size :1, /* bit 11 - Operand size */ - :3, /* bits 14:12 - Undefined */ - segment_reg :3, /* bits 17:15 - Segment register */ - index_reg :4, /* bits 21:18 - Index register */ - index_reg_invalid :1, /* bit 22 - Index register invalid */ - base_reg :4, /* bits 26:23 - Base register */ - base_reg_invalid :1, /* bit 27 - Base register invalid */ - instr_identity :1, /* bit 28 - 0:GDT, 1:IDT */ - instr_write :1, /* bit 29 - 0:store, 1:load */ - :34; /* bits 30:63 - Undefined */ - }; -} idt_or_gdt_instr_info_t; - -/* VM-Exit instruction info for LLDT, LTR, SLDT, STR */ -typedef union ldt_or_tr_instr_info { - unsigned long raw; - struct { - unsigned long scaling :2, /* bits 0:1 - Scaling */ - :1, /* bit 2 - Undefined */ - reg1 :4, /* bits 6:3 - Reg1 */ - addr_size :3, /* bits 9:7 - Address size */ - mem_reg :1, /* bit 10 - Mem/Reg */ - :4, /* bits 14:11 - Undefined */ - segment_reg :3, /* bits 17:15 - Segment register */ - index_reg :4, /* bits 21:18 - Index register */ - index_reg_invalid :1, /* bit 22 - Index register invalid */ - base_reg :4, /* bits 26:23 - Base register */ - base_reg_invalid :1, /* bit 27 - Base register invalid */ - instr_identity :1, /* bit 28 - 0:LDT, 1:TR */ - instr_write :1, /* bit 29 - 0:store, 1:load */ - :34; /* bits 31:63 - Undefined */ - }; -} ldt_or_tr_instr_info_t; - -#endif /* __ASM_X86_HVM_VMX_VMX_H__ */ diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h deleted file mode 100644 index d5f68f30b1..0000000000 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ /dev/null @@ -1,214 +0,0 @@ - -/* - * vvmx.h: Support virtual VMX for nested virtualization. - * - * Copyright (c) 2010, Intel Corporation. - * Author: Qing He - * Eddie Dong - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - */ -#ifndef __ASM_X86_HVM_VVMX_H__ -#define __ASM_X86_HVM_VVMX_H__ - -struct vvmcs_list { - unsigned long vvmcs_mfn; - struct list_head node; -}; - -struct nestedvmx { - /* - * vmxon_region_pa is also used to indicate whether a vcpu is in - * the VMX operation. When a vcpu is out of the VMX operation, its - * vmxon_region_pa is set to an invalid address INVALID_PADDR. We - * cannot use 0 for this purpose, because it's a valid VMXON region - * address. - */ - paddr_t vmxon_region_pa; - void *iobitmap[2]; /* map (va) of L1 guest I/O bitmap */ - struct vmx_msr_bitmap *msrbitmap; /* map (va) of L1 guest MSR bitmap */ - struct vmx_msr_bitmap *msr_merged; /* merged L1 and L2 MSR bitmap */ - /* deferred nested interrupt */ - struct { - unsigned long intr_info; - u32 error_code; - u8 source; - } intr; - struct { - bool_t enabled; - uint32_t exit_reason; - uint32_t exit_qual; - } ept; - uint32_t guest_vpid; - struct list_head launched_list; -}; - -#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx) - -/* bit 1, 2, 4 must be 1 */ -#define VMX_PINBASED_CTLS_DEFAULT1 0x16 -/* bit 1, 4-6,8,13-16,26 must be 1 */ -#define VMX_PROCBASED_CTLS_DEFAULT1 0x401e172 -/* bit 0-8, 10,11,13,14,16,17 must be 1 */ -#define VMX_EXIT_CTLS_DEFAULT1 0x36dff -/* bit 0-8, and 12 must be 1 */ -#define VMX_ENTRY_CTLS_DEFAULT1 0x11ff - - -union vmx_inst_info { - struct { - unsigned int scaling :2; /* bit 0-1 */ - unsigned int __rsvd0 :1; /* bit 2 */ - unsigned int reg1 :4; /* bit 3-6 */ - unsigned int addr_size :3; /* bit 7-9 */ - unsigned int memreg :1; /* bit 10 */ - unsigned int __rsvd1 :4; /* bit 11-14 */ - unsigned int segment :3; /* bit 15-17 */ - unsigned int index_reg :4; /* bit 18-21 */ - unsigned int index_reg_invalid :1; /* bit 22 */ - unsigned int base_reg :4; /* bit 23-26 */ - unsigned int base_reg_invalid :1; /* bit 27 */ - unsigned int reg2 :4; /* bit 28-31 */ - } fields; - u32 word; -}; - -int nvmx_vcpu_initialise(struct vcpu *v); -void nvmx_vcpu_destroy(struct vcpu *v); -int nvmx_vcpu_reset(struct vcpu *v); -uint64_t nvmx_vcpu_eptp_base(struct vcpu *v); -enum hvm_intblk nvmx_intr_blocked(struct vcpu *v); -bool_t nvmx_intercepts_exception( - struct vcpu *v, unsigned int vector, int error_code); -void nvmx_domain_relinquish_resources(struct domain *d); - -bool_t nvmx_ept_enabled(struct vcpu *v); - -#define EPT_TRANSLATE_SUCCEED 0 -#define EPT_TRANSLATE_VIOLATION 1 -#define EPT_TRANSLATE_MISCONFIG 2 -#define EPT_TRANSLATE_RETRY 3 - -int -nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, - unsigned int *page_order, uint8_t *p2m_acc, - bool_t access_r, bool_t access_w, bool_t access_x); -/* - * Virtual VMCS layout - * - * Since physical VMCS layout is unknown, a custom layout is used - * for virtual VMCS seen by guest. It occupies a 4k page, and the - * field is offset by an 9-bit offset into u64[], The offset is as - * follow, which means every pair has a max of 32 - * fields available. - * - * 9 7 5 0 - * -------------------------------- - * offset: | width | type | index | - * -------------------------------- - * - * Also, since the lower range has only one - * field: VPID, it is moved to a higher offset (63), and leaves the - * lower range to non-indexed field like VMCS revision. - * - */ - -struct vvmcs_header { - u32 revision; - u32 abort; -}; - -union vmcs_encoding { - struct { - u32 access_type : 1; - u32 index : 9; - u32 type : 2; - u32 rsv1 : 1; - u32 width : 2; - u32 rsv2 : 17; - }; - u32 word; -}; - -enum vvmcs_encoding_width { - VVMCS_WIDTH_16 = 0, - VVMCS_WIDTH_64, - VVMCS_WIDTH_32, - VVMCS_WIDTH_NATURAL, -}; - -enum vvmcs_encoding_type { - VVMCS_TYPE_CONTROL = 0, - VVMCS_TYPE_RO, - VVMCS_TYPE_GSTATE, - VVMCS_TYPE_HSTATE, -}; - -u64 get_vvmcs_virtual(void *vvmcs, u32 encoding); -u64 get_vvmcs_real(const struct vcpu *, u32 encoding); -void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val); -void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val); -enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 *val); -enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *, u32 encoding, - u64 *val); -enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val); -enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding, - u64 val); - -#define get_vvmcs(vcpu, encoding) \ - (cpu_has_vmx_vmcs_shadowing ? \ - get_vvmcs_real(vcpu, encoding) : \ - get_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding)) - -#define set_vvmcs(vcpu, encoding, val) \ - (cpu_has_vmx_vmcs_shadowing ? \ - set_vvmcs_real(vcpu, encoding, val) : \ - set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val)) - -#define get_vvmcs_safe(vcpu, encoding, val) \ - (cpu_has_vmx_vmcs_shadowing ? \ - get_vvmcs_real_safe(vcpu, encoding, val) : \ - get_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val)) - -#define set_vvmcs_safe(vcpu, encoding, val) \ - (cpu_has_vmx_vmcs_shadowing ? \ - set_vvmcs_real_safe(vcpu, encoding, val) : \ - set_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val)) - -void nvmx_destroy_vmcs(struct vcpu *v); -int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason); -int nvmx_msr_read_intercept(unsigned int msr, - u64 *msr_content); - -void nvmx_update_exec_control(struct vcpu *v, u32 value); -void nvmx_update_secondary_exec_control(struct vcpu *v, - unsigned long value); -void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value); -void nvmx_switch_guest(void); -void nvmx_idtv_handling(void); -u64 nvmx_get_tsc_offset(struct vcpu *v); -int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, - unsigned int exit_reason); -void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr); - -uint64_t nept_get_ept_vpid_cap(void); - -int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, - unsigned int *page_order, uint32_t rwx_acc, - unsigned long *l1gfn, uint8_t *p2m_acc, - uint64_t *exit_qual, uint32_t *exit_reason); -int nvmx_cpu_up_prepare(unsigned int cpu); -void nvmx_cpu_dead(unsigned int cpu); -#endif /* __ASM_X86_HVM_VVMX_H__ */ - diff --git a/xen/include/asm-x86/hvm/vpic.h b/xen/include/asm-x86/hvm/vpic.h deleted file mode 100644 index d71b270193..0000000000 --- a/xen/include/asm-x86/hvm/vpic.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * i8259 interrupt controller emulation - * - * Copyright (c) 2003 Fabrice Bellard - * Copyright (c) 2005 Intel Corp - * Copyright (c) 2006 Keir Fraser, XenSource Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef __ASM_X86_HVM_VPIC_H__ -#define __ASM_X86_HVM_VPIC_H__ - -struct domain; -struct vcpu; - -void vpic_irq_positive_edge(struct domain *d, int irq); -void vpic_irq_negative_edge(struct domain *d, int irq); -void vpic_init(struct domain *d); -void vpic_reset(struct domain *d); -int vpic_ack_pending_irq(struct vcpu *v); -int is_periodic_irq(struct vcpu *v, int irq, int type); - -#endif /* __ASM_X86_HVM_VPIC_H__ */ diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h deleted file mode 100644 index 74c0cedd11..0000000000 --- a/xen/include/asm-x86/hvm/vpt.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - * vpt.h: Virtual Platform Timer definitions - * - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_HVM_VPT_H__ -#define __ASM_X86_HVM_VPT_H__ - -#include -#include -#include -#include - -/* - * Abstract layer of periodic time, one short time. - */ -typedef void time_cb(struct vcpu *v, void *opaque); - -struct periodic_time { - struct list_head list; - bool on_list; - bool one_shot; - bool do_not_freeze; - bool irq_issued; - bool warned_timeout_too_short; - bool level; -#define PTSRC_isa 1 /* ISA time source */ -#define PTSRC_lapic 2 /* LAPIC time source */ -#define PTSRC_ioapic 3 /* IOAPIC time source */ - u8 source; /* PTSRC_ */ - u8 irq; - struct vcpu *vcpu; /* vcpu timer interrupt delivers to */ - u32 pending_intr_nr; /* pending timer interrupts */ - u64 period; /* frequency in ns */ - s_time_t scheduled; /* scheduled timer interrupt */ - u64 last_plt_gtime; /* platform time when last IRQ is injected */ - struct timer timer; /* ac_timer */ - time_cb *cb; - void *priv; /* point back to platform time source */ -}; - - -#define PIT_FREQ 1193182 -#define PIT_BASE 0x40 - -typedef struct PITState { - /* Hardware state */ - struct hvm_hw_pit hw; - /* Last time the counters read zero, for calcuating counter reads */ - int64_t count_load_time[3]; - /* Channel 0 IRQ handling. */ - struct periodic_time pt0; - spinlock_t lock; -} PITState; - -struct hpet_registers { - /* Memory-mapped, software visible registers */ - uint64_t capability; /* capabilities */ - uint64_t config; /* configuration */ - uint64_t isr; /* interrupt status reg */ - uint64_t mc64; /* main counter */ - struct { /* timers */ - uint64_t config; /* configuration/cap */ - uint64_t cmp; /* comparator */ - uint64_t fsb; /* FSB route, not supported now */ - } timers[HPET_TIMER_NUM]; - - /* Hidden register state */ - uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ - uint64_t comparator64[HPET_TIMER_NUM]; /* 64 bit running comparator */ -}; - -typedef struct HPETState { - struct hpet_registers hpet; - uint64_t stime_freq; - uint64_t hpet_to_ns_scale; /* hpet ticks to ns (multiplied by 2^10) */ - uint64_t hpet_to_ns_limit; /* max hpet ticks convertable to ns */ - uint64_t mc_offset; - struct periodic_time pt[HPET_TIMER_NUM]; - rwlock_t lock; -} HPETState; - -typedef struct RTCState { - /* Hardware state */ - struct hvm_hw_rtc hw; - /* RTC's idea of the current time */ - struct tm current_tm; - /* update-ended timer */ - struct timer update_timer; - struct timer update_timer2; - uint64_t next_update_time; - /* alarm timer */ - struct timer alarm_timer; - /* periodic timer */ - struct periodic_time pt; - s_time_t start_time; - s_time_t check_ticks_since; - int period; - uint8_t pt_dead_ticks; - uint32_t use_timer; - spinlock_t lock; -} RTCState; - -#define FREQUENCE_PMTIMER 3579545 /* Timer should run at 3.579545 MHz */ -typedef struct PMTState { - struct vcpu *vcpu; /* Keeps sync with this vcpu's guest-time */ - uint64_t last_gtime; /* Last (guest) time we updated the timer */ - uint32_t not_accounted; /* time not accounted at last update */ - uint64_t scale; /* Multiplier to get from tsc to timer ticks */ - struct timer timer; /* To make sure we send SCIs */ - spinlock_t lock; -} PMTState; - -struct pl_time { /* platform time */ - struct RTCState vrtc; - struct HPETState vhpet; - struct PMTState vpmt; - /* - * Functions which want to modify the vcpu field of the vpt need - * to hold the global lock (pt_migrate) in write mode together - * with the per-vcpu locks of the lists being modified. Functions - * that want to lock a periodic_timer that's possibly on a - * different vCPU list need to take the lock in read mode first in - * order to prevent the vcpu field of periodic_timer from - * changing. - * - * Note that two vcpu locks cannot be held at the same time to - * avoid a deadlock. - */ - rwlock_t pt_migrate; - /* guest_time = Xen sys time + stime_offset */ - int64_t stime_offset; - /* Ensures monotonicity in appropriate timer modes. */ - uint64_t last_guest_time; - spinlock_t pl_time_lock; - struct domain *domain; -}; - -void pt_save_timer(struct vcpu *v); -void pt_restore_timer(struct vcpu *v); -int pt_update_irq(struct vcpu *v); -struct hvm_intack; -void pt_intr_post(struct vcpu *v, struct hvm_intack intack); -void pt_migrate(struct vcpu *v); - -void pt_adjust_global_vcpu_target(struct vcpu *v); -#define pt_global_vcpu_target(d) \ - (is_hvm_domain(d) && (d)->arch.hvm.i8259_target ? \ - (d)->arch.hvm.i8259_target : \ - (d)->vcpu ? (d)->vcpu[0] : NULL) - -void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt); - -/* Is given periodic timer active? */ -#define pt_active(pt) ((pt)->on_list || (pt)->pending_intr_nr) - -/* - * Create/destroy a periodic (or one-shot!) timer. - * The given periodic timer structure must be initialised with zero bytes, - * except for the 'source' field which must be initialised with the - * correct PTSRC_ value. The initialised timer structure can then be passed - * to {create,destroy}_periodic_time() any number of times and in any order. - * Note that, for a given periodic timer, invocations of these functions MUST - * be serialised. - */ -void create_periodic_time( - struct vcpu *v, struct periodic_time *pt, uint64_t delta, - uint64_t period, uint8_t irq, time_cb *cb, void *data, bool level); -void destroy_periodic_time(struct periodic_time *pt); - -int pv_pit_handler(int port, int data, int write); -void pit_reset(struct domain *d); - -void pit_init(struct domain *d, unsigned long cpu_khz); -void pit_stop_channel0_irq(PITState * pit); -void pit_deinit(struct domain *d); -void rtc_init(struct domain *d); -void rtc_migrate_timers(struct vcpu *v); -void rtc_deinit(struct domain *d); -void rtc_reset(struct domain *d); -void rtc_update_clock(struct domain *d); - -void pmtimer_init(struct vcpu *v); -void pmtimer_deinit(struct domain *d); -void pmtimer_reset(struct domain *d); -int pmtimer_change_ioport(struct domain *d, uint64_t version); - -void hpet_init(struct domain *d); -void hpet_deinit(struct domain *d); -void hpet_reset(struct domain *d); - -#endif /* __ASM_X86_HVM_VPT_H__ */ diff --git a/xen/include/asm-x86/hypercall.h b/xen/include/asm-x86/hypercall.h deleted file mode 100644 index 5d394d4923..0000000000 --- a/xen/include/asm-x86/hypercall.h +++ /dev/null @@ -1,198 +0,0 @@ -/****************************************************************************** - * asm-x86/hypercall.h - */ - -#ifndef __ASM_X86_HYPERCALL_H__ -#define __ASM_X86_HYPERCALL_H__ - -#include -#include -#include -#include /* for do_mca */ -#include - -typedef unsigned long hypercall_fn_t( - unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long); - -typedef struct { - hypercall_fn_t *native; -#ifdef CONFIG_PV32 - hypercall_fn_t *compat; -#endif -} pv_hypercall_table_t; - -typedef struct { - uint8_t native; -#ifdef CONFIG_COMPAT - uint8_t compat; -#endif -} hypercall_args_t; - -extern const hypercall_args_t hypercall_args_table[NR_hypercalls]; - -#ifdef CONFIG_PV -extern const pv_hypercall_table_t pv_hypercall_table[]; -void pv_hypercall(struct cpu_user_regs *regs); -#endif - -void pv_ring1_init_hypercall_page(void *ptr); -void pv_ring3_init_hypercall_page(void *ptr); - -/* - * Both do_mmuext_op() and do_mmu_update(): - * We steal the m.s.b. of the @count parameter to indicate whether this - * invocation of do_mmu_update() is resuming a previously preempted call. - */ -#define MMU_UPDATE_PREEMPTED (~(~0U>>1)) - -extern long -do_event_channel_op_compat( - XEN_GUEST_HANDLE_PARAM(evtchn_op_t) uop); - -/* Legacy hypercall (as of 0x00030202). */ -extern long do_physdev_op_compat( - XEN_GUEST_HANDLE(physdev_op_t) uop); - -/* Legacy hypercall (as of 0x00030101). */ -extern long do_sched_op_compat( - int cmd, unsigned long arg); - -extern long -do_set_trap_table( - XEN_GUEST_HANDLE_PARAM(const_trap_info_t) traps); - -extern long -do_mmu_update( - XEN_GUEST_HANDLE_PARAM(mmu_update_t) ureqs, - unsigned int count, - XEN_GUEST_HANDLE_PARAM(uint) pdone, - unsigned int foreigndom); - -extern long -do_set_gdt( - XEN_GUEST_HANDLE_PARAM(xen_ulong_t) frame_list, - unsigned int entries); - -extern long -do_stack_switch( - unsigned long ss, - unsigned long esp); - -extern long -do_fpu_taskswitch( - int set); - -extern long -do_set_debugreg( - int reg, - unsigned long value); - -extern unsigned long -do_get_debugreg( - int reg); - -extern long -do_update_descriptor( - uint64_t gaddr, seg_desc_t desc); - -extern long -do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc); - -extern long -do_update_va_mapping( - unsigned long va, - u64 val64, - unsigned long flags); - -extern long -do_physdev_op( - int cmd, XEN_GUEST_HANDLE_PARAM(void) arg); - -extern long -do_update_va_mapping_otherdomain( - unsigned long va, - u64 val64, - unsigned long flags, - domid_t domid); - -extern long -do_mmuext_op( - XEN_GUEST_HANDLE_PARAM(mmuext_op_t) uops, - unsigned int count, - XEN_GUEST_HANDLE_PARAM(uint) pdone, - unsigned int foreigndom); - -extern long do_callback_op( - int cmd, XEN_GUEST_HANDLE_PARAM(const_void) arg); - -extern unsigned long -do_iret( - void); - -extern long -do_set_callbacks( - unsigned long event_address, - unsigned long failsafe_address, - unsigned long syscall_address); - -extern long -do_set_segment_base( - unsigned int which, - unsigned long base); - -#ifdef CONFIG_COMPAT - -#include -#include - -extern int -compat_physdev_op( - int cmd, - XEN_GUEST_HANDLE_PARAM(void) arg); - -extern int -arch_compat_vcpu_op( - int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg); - -extern int compat_mmuext_op( - XEN_GUEST_HANDLE_PARAM(void) arg, - unsigned int count, - XEN_GUEST_HANDLE_PARAM(uint) pdone, - unsigned int foreigndom); - -extern int compat_platform_op( - XEN_GUEST_HANDLE_PARAM(void) u_xenpf_op); - -extern long compat_callback_op( - int cmd, XEN_GUEST_HANDLE(void) arg); - -extern int compat_update_va_mapping( - unsigned int va, u32 lo, u32 hi, unsigned int flags); - -extern int compat_update_va_mapping_otherdomain( - unsigned int va, u32 lo, u32 hi, unsigned int flags, domid_t domid); - -DEFINE_XEN_GUEST_HANDLE(trap_info_compat_t); -extern int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps); - -extern int compat_set_gdt( - XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int entries); - -extern int compat_update_descriptor( - u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi); - -extern unsigned int compat_iret(void); - -extern int compat_nmi_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg); - -extern long compat_set_callbacks( - unsigned long event_selector, unsigned long event_address, - unsigned long failsafe_selector, unsigned long failsafe_address); - -DEFINE_XEN_GUEST_HANDLE(physdev_op_compat_t); -extern int compat_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_compat_t) uop); - -#endif /* CONFIG_COMPAT */ - -#endif /* __ASM_X86_HYPERCALL_H__ */ diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h deleted file mode 100644 index a783549db9..0000000000 --- a/xen/include/asm-x86/i387.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * include/asm-i386/i387.h - * - * Copyright (C) 1994 Linus Torvalds - * - * Pentium III FXSR, SSE support - * General FPU state handling cleanups - * Gareth Hughes , May 2000 - */ - -#ifndef __ASM_I386_I387_H -#define __ASM_I386_I387_H - -#include - -/* Byte offset of the stored word size within the FXSAVE area/portion. */ -#define FPU_WORD_SIZE_OFFSET 511 - -struct ix87_env { - uint16_t fcw, _res0; - uint16_t fsw, _res1; - uint16_t ftw, _res2; - uint32_t fip; - uint16_t fcs; - uint16_t fop; - uint32_t fdp; - uint16_t fds, _res6; -}; - -void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts); -void vcpu_restore_fpu_lazy(struct vcpu *v); -void vcpu_save_fpu(struct vcpu *v); -void save_fpu_enable(void); - -int vcpu_init_fpu(struct vcpu *v); -struct xsave_struct; -void vcpu_setup_fpu(struct vcpu *v, struct xsave_struct *xsave_area, - const void *data, unsigned int fcw_default); -void vcpu_destroy_fpu(struct vcpu *v); -#endif /* __ASM_I386_I387_H */ diff --git a/xen/include/asm-x86/init.h b/xen/include/asm-x86/init.h deleted file mode 100644 index 5295b35e63..0000000000 --- a/xen/include/asm-x86/init.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _XEN_ASM_INIT_H -#define _XEN_ASM_INIT_H - -#endif /* _XEN_ASM_INIT_H */ diff --git a/xen/include/asm-x86/invpcid.h b/xen/include/asm-x86/invpcid.h deleted file mode 100644 index bf5c30313a..0000000000 --- a/xen/include/asm-x86/invpcid.h +++ /dev/null @@ -1,67 +0,0 @@ -#ifndef _ASM_X86_INVPCID_H_ -#define _ASM_X86_INVPCID_H_ - -#include - -extern bool use_invpcid; - -#define INVPCID_OPCODE ".byte 0x66, 0x0f, 0x38, 0x82\n" -#define MODRM_ECX_01 ".byte 0x01\n" - -static inline void invpcid(unsigned int pcid, unsigned long addr, - unsigned int type) -{ - struct { - uint64_t pcid:12; - uint64_t reserved:52; - uint64_t addr; - } desc = { .pcid = pcid, .addr = addr }; - - asm volatile ( -#ifdef HAVE_AS_INVPCID - "invpcid %[desc], %q[type]" - : /* No output */ - : [desc] "m" (desc), [type] "r" (type) -#else - INVPCID_OPCODE MODRM_ECX_01 - : /* No output */ - : "a" (type), "c" (&desc) -#endif - : "memory" ); -} - -/* Flush all mappings for a given PCID and addr, not including globals */ -static inline void invpcid_flush_one(unsigned int pcid, unsigned long addr) -{ - invpcid(pcid, addr, X86_INVPCID_INDIV_ADDR); -} - -/* Flush all mappings for a given PCID, not including globals */ -static inline void invpcid_flush_single_context(unsigned int pcid) -{ - invpcid(pcid, 0, X86_INVPCID_SINGLE_CTXT); -} - -/* Flush all mappings, including globals, for all PCIDs */ -static inline void invpcid_flush_all(void) -{ - invpcid(0, 0, X86_INVPCID_ALL_INCL_GLOBAL); -} - -/* Flush all mappings for all PCIDs, excluding globals */ -static inline void invpcid_flush_all_nonglobals(void) -{ - invpcid(0, 0, X86_INVPCID_ALL_NON_GLOBAL); -} - -#endif /* _ASM_X86_INVPCID_H_ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/io.h b/xen/include/asm-x86/io.h deleted file mode 100644 index 92b784a861..0000000000 --- a/xen/include/asm-x86/io.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef _ASM_IO_H -#define _ASM_IO_H - -#include -#include - -#define readb(x) (*(volatile uint8_t *)(x)) -#define readw(x) (*(volatile uint16_t *)(x)) -#define readl(x) (*(volatile uint32_t *)(x)) -#define readq(x) (*(volatile uint64_t *)(x)) -#define writeb(d,x) (*(volatile uint8_t *)(x) = (d)) -#define writew(d,x) (*(volatile uint16_t *)(x) = (d)) -#define writel(d,x) (*(volatile uint32_t *)(x) = (d)) -#define writeq(d,x) (*(volatile uint64_t *)(x) = (d)) - -#define __OUT1(s,x) \ -static inline void out##s(unsigned x value, unsigned short port) { - -#define __OUT2(s,s1,s2) \ -__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" - -#define __OUT(s,s1,x) \ -__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ -__OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port));} - -#define __IN1(s) \ -static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; - -#define __IN2(s,s1,s2) \ -__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" - -#define __IN(s,s1,i...) \ -__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ -__IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } - -#define RETURN_TYPE unsigned char -__IN(b,"") -#undef RETURN_TYPE -#define RETURN_TYPE unsigned short -__IN(w,"") -#undef RETURN_TYPE -#define RETURN_TYPE unsigned int -__IN(l,"") -#undef RETURN_TYPE - -__OUT(b,"b",char) -__OUT(w,"w",short) -__OUT(l,,int) - -/* Function pointer used to handle platform specific I/O port emulation. */ -#define IOEMUL_QUIRK_STUB_BYTES 9 -struct cpu_user_regs; -extern unsigned int (*ioemul_handle_quirk)( - u8 opcode, char *io_emul_stub, struct cpu_user_regs *regs); - -#endif diff --git a/xen/include/asm-x86/io_apic.h b/xen/include/asm-x86/io_apic.h deleted file mode 100644 index ef0878b09e..0000000000 --- a/xen/include/asm-x86/io_apic.h +++ /dev/null @@ -1,212 +0,0 @@ -#ifndef __ASM_IO_APIC_H -#define __ASM_IO_APIC_H - -#include -#include -#include -#include -#include - -/* - * Intel IO-APIC support for SMP and UP systems. - * - * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar - */ - -#define IO_APIC_BASE(idx) \ - ((volatile uint32_t *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + (idx)) \ - + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) - -#define IO_APIC_ID(idx) (mp_ioapics[idx].mpc_apicid) - -/* I/O Unit Redirection Table */ -#define IO_APIC_REDIR_VECTOR_MASK 0x000FF -#define IO_APIC_REDIR_DELIV_MODE_MASK 0x00700 -#define IO_APIC_REDIR_DEST_LOGICAL 0x00800 -#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000 -#define IO_APIC_REDIR_SEND_PENDING (1 << 12) -#define IO_APIC_REDIR_REMOTE_IRR (1 << 14) -#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15) -#define IO_APIC_REDIR_MASKED (1 << 16) - -/* - * The structure of the IO-APIC: - */ -union IO_APIC_reg_00 { - uint32_t raw; - struct { - unsigned int __reserved_2:14; - unsigned int LTS:1; - unsigned int delivery_type:1; - unsigned int __reserved_1:8; - unsigned int ID:8; - } bits; -}; - -union IO_APIC_reg_01 { - uint32_t raw; - struct { - unsigned int version:8; - unsigned int __reserved_2:7; - unsigned int PRQ:1; - unsigned int entries:8; - unsigned int __reserved_1:8; - } bits; -}; - -union IO_APIC_reg_02 { - uint32_t raw; - struct { - unsigned int __reserved_2:24; - unsigned int arbitration:4; - unsigned int __reserved_1:4; - } bits; -}; - -union IO_APIC_reg_03 { - uint32_t raw; - struct { - unsigned int boot_DT:1; - unsigned int __reserved_1:31; - } bits; -}; - -/* - * # of IO-APICs and # of IRQ routing registers - */ -extern int nr_ioapics; -extern int nr_ioapic_entries[MAX_IO_APICS]; - -enum ioapic_irq_destination_types { - dest_Fixed = 0, - dest_LowestPrio = 1, - dest_SMI = 2, - dest__reserved_1 = 3, - dest_NMI = 4, - dest_INIT = 5, - dest__reserved_2 = 6, - dest_ExtINT = 7 -}; - -struct IO_APIC_route_entry { - unsigned int vector:8; - unsigned int delivery_mode:3; /* - * 000: FIXED - * 001: lowest prio - * 111: ExtINT - */ - unsigned int dest_mode:1; /* 0: physical, 1: logical */ - unsigned int delivery_status:1; - unsigned int polarity:1; /* 0: low, 1: high */ - unsigned int irr:1; - unsigned int trigger:1; /* 0: edge, 1: level */ - unsigned int mask:1; /* 0: enabled, 1: disabled */ - unsigned int __reserved_2:15; - - union { - struct { - unsigned int __reserved_1:24; - unsigned int physical_dest:4; - unsigned int __reserved_2:4; - } physical; - - struct { - unsigned int __reserved_1:24; - unsigned int logical_dest:8; - } logical; - - /* used when Interrupt Remapping with EIM is enabled */ - unsigned int dest32; - } dest; -}; - -/* - * MP-BIOS irq configuration table structures: - */ - -/* I/O APIC entries */ -extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; - -/* Base GSI for this IO APIC */ -unsigned int io_apic_gsi_base(unsigned int apic); - -/* Only need to remap ioapic RTE (reg: 10~3Fh) */ -#define ioapic_reg_remapped(reg) (iommu_intremap && ((reg) >= 0x10)) - -static inline unsigned int __io_apic_read(unsigned int apic, unsigned int reg) -{ - volatile uint32_t *regs = IO_APIC_BASE(apic); - - regs[0] = reg; - return regs[4]; -} - -static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) -{ - if ( ioapic_reg_remapped(reg) ) - return iommu_read_apic_from_ire(apic, reg); - return __io_apic_read(apic, reg); -} - -static inline void __io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) -{ - volatile uint32_t *regs = IO_APIC_BASE(apic); - - regs[0] = reg; - regs[4] = value; -} - -static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) -{ - if ( ioapic_reg_remapped(reg) ) - return iommu_update_ire_from_apic(apic, reg, value); - __io_apic_write(apic, reg, value); -} - -/* - * Re-write a value: to be used for read-modify-write - * cycles where the read already set up the index register. - */ -static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) -{ - if ( ioapic_reg_remapped(reg) ) - return iommu_update_ire_from_apic(apic, reg, value); - *(IO_APIC_BASE(apic) + 4) = value; -} - -/* 1 if "noapic" boot option passed */ -extern bool skip_ioapic_setup; -extern bool ioapic_ack_new; -extern bool ioapic_ack_forced; - -extern int io_apic_get_unique_id (int ioapic, int apic_id); -extern int io_apic_get_version (int ioapic); -extern int io_apic_get_redir_entries (int ioapic); -extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); - -extern void ioapic_init(void); - -extern void ioapic_suspend(void); -extern void ioapic_resume(void); - -extern void dump_ioapic_irq_info(void); - -extern struct IO_APIC_route_entry __ioapic_read_entry( - unsigned int apic, unsigned int pin, bool raw); -void __ioapic_write_entry( - unsigned int apic, unsigned int pin, bool raw, - struct IO_APIC_route_entry); - -extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); -extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); -extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); -extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); -extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries, - bool raw); - -unsigned highest_gsi(void); - -int ioapic_guest_read( unsigned long physbase, unsigned int reg, u32 *pval); -int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 pval); - -#endif diff --git a/xen/include/asm-x86/iocap.h b/xen/include/asm-x86/iocap.h deleted file mode 100644 index eee47228d4..0000000000 --- a/xen/include/asm-x86/iocap.h +++ /dev/null @@ -1,21 +0,0 @@ -/****************************************************************************** - * iocap.h - * - * Architecture-specific per-domain I/O capabilities. - */ - -#ifndef __X86_IOCAP_H__ -#define __X86_IOCAP_H__ - -#define ioports_permit_access(d, s, e) \ - rangeset_add_range((d)->arch.ioport_caps, s, e) -#define ioports_deny_access(d, s, e) \ - rangeset_remove_range((d)->arch.ioport_caps, s, e) -#define ioports_access_permitted(d, s, e) \ - rangeset_contains_range((d)->arch.ioport_caps, s, e) - -#define cache_flush_permitted(d) \ - (!rangeset_is_empty((d)->iomem_caps) || \ - !rangeset_is_empty((d)->arch.ioport_caps)) - -#endif /* __X86_IOCAP_H__ */ diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h deleted file mode 100644 index de46149b40..0000000000 --- a/xen/include/asm-x86/iommu.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . -*/ -#ifndef __ARCH_X86_IOMMU_H__ -#define __ARCH_X86_IOMMU_H__ - -#include -#include -#include -#include -#include -#include -#include - -#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 - -struct g2m_ioport { - struct list_head list; - unsigned int gport; - unsigned int mport; - unsigned int np; -}; - -#define IOMMU_PAGE_SHIFT 12 -#define IOMMU_PAGE_SIZE (1 << IOMMU_PAGE_SHIFT) -#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1)) - -typedef uint64_t daddr_t; - -#define __dfn_to_daddr(dfn) ((daddr_t)(dfn) << IOMMU_PAGE_SHIFT) -#define __daddr_to_dfn(daddr) ((daddr) >> IOMMU_PAGE_SHIFT) - -#define dfn_to_daddr(dfn) __dfn_to_daddr(dfn_x(dfn)) -#define daddr_to_dfn(daddr) _dfn(__daddr_to_dfn(daddr)) - -struct arch_iommu -{ - spinlock_t mapping_lock; /* io page table lock */ - struct { - struct page_list_head list; - spinlock_t lock; - } pgtables; - - struct list_head identity_maps; - - union { - /* Intel VT-d */ - struct { - uint64_t pgd_maddr; /* io page directory machine address */ - unsigned int agaw; /* adjusted guest address width, 0 is level 2 30-bit */ - unsigned long *iommu_bitmap; /* bitmap of iommu(s) that the domain uses */ - } vtd; - /* AMD IOMMU */ - struct { - unsigned int paging_mode; - struct page_info *root_table; - struct guest_iommu *g_iommu; - } amd; - }; -}; - -extern struct iommu_ops iommu_ops; - -#ifdef NDEBUG -# include -# define iommu_call(ops, fn, args...) ({ \ - (void)(ops); \ - alternative_call(iommu_ops.fn, ## args); \ -}) - -# define iommu_vcall(ops, fn, args...) ({ \ - (void)(ops); \ - alternative_vcall(iommu_ops.fn, ## args); \ -}) -#endif - -static inline const struct iommu_ops *iommu_get_ops(void) -{ - BUG_ON(!iommu_ops.init); - return &iommu_ops; -} - -struct iommu_init_ops { - const struct iommu_ops *ops; - int (*setup)(void); - bool (*supports_x2apic)(void); -}; - -extern const struct iommu_init_ops *iommu_init_ops; - -void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value); -unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg); -int iommu_setup_hpet_msi(struct msi_desc *); - -static inline int iommu_adjust_irq_affinities(void) -{ - return iommu_ops.adjust_irq_affinities - ? iommu_ops.adjust_irq_affinities() - : 0; -} - -static inline bool iommu_supports_x2apic(void) -{ - return iommu_init_ops && iommu_init_ops->supports_x2apic - ? iommu_init_ops->supports_x2apic() - : false; -} - -int iommu_enable_x2apic(void); - -static inline void iommu_disable_x2apic(void) -{ - if ( x2apic_enabled && iommu_ops.disable_x2apic ) - iommu_ops.disable_x2apic(); -} - -int iommu_identity_mapping(struct domain *d, p2m_access_t p2ma, - paddr_t base, paddr_t end, - unsigned int flag); -void iommu_identity_map_teardown(struct domain *d); - -extern bool untrusted_msi; - -int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq, - const uint8_t gvec); - -#define iommu_sync_cache(addr, size) ({ \ - const struct iommu_ops *ops = iommu_get_ops(); \ - \ - if ( ops->sync_cache ) \ - iommu_vcall(ops, sync_cache, addr, size); \ -}) - -int __must_check iommu_free_pgtables(struct domain *d); -struct page_info *__must_check iommu_alloc_pgtable(struct domain *d); - -#endif /* !__ARCH_X86_IOMMU_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/ioreq.h b/xen/include/asm-x86/ioreq.h deleted file mode 100644 index d06ce9a6ea..0000000000 --- a/xen/include/asm-x86/ioreq.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * ioreq.h: Hardware virtual machine assist interface definitions. - * - * This is a wrapper which purpose is to not include arch HVM specific header - * from the common code. - * - * Copyright (c) 2016 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_IOREQ_H__ -#define __ASM_X86_IOREQ_H__ - -#ifdef CONFIG_HVM -#include -#endif - -#endif /* __ASM_X86_IOREQ_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h deleted file mode 100644 index 7c825e9d9c..0000000000 --- a/xen/include/asm-x86/irq.h +++ /dev/null @@ -1,221 +0,0 @@ -#ifndef _ASM_HW_IRQ_H -#define _ASM_HW_IRQ_H - -/* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar */ - -#include -#include -#include -#include -#include -#include - -extern unsigned int nr_irqs_gsi; -extern unsigned int nr_irqs; -#define nr_static_irqs nr_irqs_gsi - -#define IO_APIC_IRQ(irq) (platform_legacy_irq(irq) ? \ - (1 << (irq)) & io_apic_irqs : \ - (irq) < nr_irqs_gsi) - -#define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs) - -#define LEGACY_VECTOR(irq) ((irq) + FIRST_LEGACY_VECTOR) - -typedef struct { - DECLARE_BITMAP(_bits, X86_NR_VECTORS); -} vmask_t; - -struct irq_desc; - -struct arch_irq_desc { - s16 vector; /* vector itself is only 8 bits, */ - s16 old_vector; /* but we use -1 for unassigned */ - /* - * Except for high priority interrupts @cpu_mask may have bits set for - * offline CPUs. Consumers need to be careful to mask this down to - * online ones as necessary. There is supposed to always be a non- - * empty intersection with cpu_online_map. - */ - cpumask_var_t cpu_mask; - cpumask_var_t old_cpu_mask; - cpumask_var_t pending_mask; - vmask_t *used_vectors; - unsigned move_cleanup_count; - u8 move_in_progress : 1; - s8 used; - /* - * Weak reference to domain having permission over this IRQ (which can - * be different from the domain actually having the IRQ assigned) - */ - domid_t creator_domid; -}; - -/* For use with irq_desc.arch.used */ -#define IRQ_UNUSED (0) -#define IRQ_USED (1) -#define IRQ_RESERVED (-1) - -#define IRQ_VECTOR_UNASSIGNED (-1) - -typedef int vector_irq_t[X86_NR_VECTORS]; -DECLARE_PER_CPU(vector_irq_t, vector_irq); - -extern bool opt_noirqbalance; - -#define OPT_IRQ_VECTOR_MAP_DEFAULT 0 /* Do the default thing */ -#define OPT_IRQ_VECTOR_MAP_NONE 1 /* None */ -#define OPT_IRQ_VECTOR_MAP_GLOBAL 2 /* One global vector map (no vector sharing) */ -#define OPT_IRQ_VECTOR_MAP_PERDEV 3 /* Per-device vetor map (no vector sharing w/in a device) */ - -extern int opt_irq_vector_map; - -/* - * Per-cpu current frame pointer - the location of the last exception frame on - * the stack - */ -DECLARE_PER_CPU(struct cpu_user_regs *, __irq_regs); - -static inline struct cpu_user_regs *get_irq_regs(void) -{ - return this_cpu(__irq_regs); -} - -static inline struct cpu_user_regs *set_irq_regs(struct cpu_user_regs *new_regs) -{ - struct cpu_user_regs *old_regs, **pp_regs = &this_cpu(__irq_regs); - - old_regs = *pp_regs; - *pp_regs = new_regs; - return old_regs; -} - - -#define platform_legacy_irq(irq) ((irq) < 16) - -void event_check_interrupt(struct cpu_user_regs *regs); -void invalidate_interrupt(struct cpu_user_regs *regs); -void call_function_interrupt(struct cpu_user_regs *regs); -void apic_timer_interrupt(struct cpu_user_regs *regs); -void error_interrupt(struct cpu_user_regs *regs); -void pmu_apic_interrupt(struct cpu_user_regs *regs); -void spurious_interrupt(struct cpu_user_regs *regs); -void irq_move_cleanup_interrupt(struct cpu_user_regs *regs); - -uint8_t alloc_hipriority_vector(void); - -void set_direct_apic_vector( - uint8_t vector, void (*handler)(struct cpu_user_regs *)); -void alloc_direct_apic_vector( - uint8_t *vector, void (*handler)(struct cpu_user_regs *)); - -void do_IRQ(struct cpu_user_regs *regs); - -void disable_8259A_irq(struct irq_desc *); -void enable_8259A_irq(struct irq_desc *); -int i8259A_irq_pending(unsigned int irq); -void mask_8259A(void); -void unmask_8259A(void); -void init_8259A(int aeoi); -void make_8259A_irq(unsigned int irq); -bool bogus_8259A_irq(unsigned int irq); -int i8259A_suspend(void); -int i8259A_resume(void); - -void setup_IO_APIC(void); -void disable_IO_APIC(void); -void setup_ioapic_dest(void); -vmask_t *io_apic_get_used_vector_map(unsigned int irq); - -extern unsigned int io_apic_irqs; - -DECLARE_PER_CPU(unsigned int, irq_count); - -struct pirq; -struct arch_pirq { - int irq; - union { - struct hvm_pirq { - int emuirq; - struct hvm_pirq_dpci dpci; - } hvm; - }; -}; - -#define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.hvm.dpci : NULL) -#define dpci_pirq(pd) container_of(pd, struct pirq, arch.hvm.dpci) - -int pirq_shared(struct domain *d , int irq); - -int map_domain_pirq(struct domain *d, int pirq, int irq, int type, - void *data); -int unmap_domain_pirq(struct domain *d, int pirq); -int get_free_pirq(struct domain *d, int type); -int get_free_pirqs(struct domain *, unsigned int nr); -void free_domain_pirqs(struct domain *d); -int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq); -int unmap_domain_pirq_emuirq(struct domain *d, int pirq); - -/* Reset irq affinities to match the given CPU mask. */ -void fixup_irqs(const cpumask_t *mask, bool verbose); -void fixup_eoi(void); - -int init_irq_data(void); - -void clear_irq_vector(int irq); - -int irq_to_vector(int irq); -/* - * If grant_access is set the current domain is given permissions over - * the created IRQ. - */ -int create_irq(nodeid_t node, bool grant_access); -void destroy_irq(unsigned int irq); -int assign_irq_vector(int irq, const cpumask_t *); - -extern void irq_complete_move(struct irq_desc *); - -extern struct irq_desc *irq_desc; - -void lock_vector_lock(void); -void unlock_vector_lock(void); - -void setup_vector_irq(unsigned int cpu); - -void move_native_irq(struct irq_desc *); -void move_masked_irq(struct irq_desc *); - -int bind_irq_vector(int irq, int vector, const cpumask_t *); - -void end_nonmaskable_irq(struct irq_desc *, uint8_t vector); -void irq_set_affinity(struct irq_desc *, const cpumask_t *mask); - -int init_domain_irq_mapping(struct domain *); -void cleanup_domain_irq_mapping(struct domain *); - -#define domain_pirq_to_irq(d, pirq) pirq_field(d, pirq, arch.irq, 0) -#define domain_irq_to_pirq(d, irq) ({ \ - void *__ret = radix_tree_lookup(&(d)->arch.irq_pirq, irq); \ - __ret ? radix_tree_ptr_to_int(__ret) : 0; \ -}) -#define PIRQ_ALLOCATED -1 -#define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq, \ - arch.hvm.emuirq, IRQ_UNBOUND) -#define domain_emuirq_to_pirq(d, emuirq) ({ \ - void *__ret = radix_tree_lookup(&(d)->arch.hvm.emuirq_pirq, emuirq);\ - __ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND; \ -}) -#define IRQ_UNBOUND -1 -#define IRQ_PT -2 -#define IRQ_MSI_EMU -3 - -bool cpu_has_pending_apic_eoi(void); - -static inline void arch_move_irqs(struct vcpu *v) { } - -struct msi_info; -int allocate_and_map_gsi_pirq(struct domain *d, int index, int *pirq_p); -int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p, - int type, struct msi_info *msi); - -#endif /* _ASM_HW_IRQ_H */ diff --git a/xen/include/asm-x86/ldt.h b/xen/include/asm-x86/ldt.h deleted file mode 100644 index 58e3e042fc..0000000000 --- a/xen/include/asm-x86/ldt.h +++ /dev/null @@ -1,35 +0,0 @@ - -#ifndef __ARCH_LDT_H -#define __ARCH_LDT_H - -#ifndef __ASSEMBLY__ - -static inline void load_LDT(struct vcpu *v) -{ - seg_desc_t *desc; - unsigned long ents; - - if ( (ents = v->arch.pv.ldt_ents) == 0 ) - lldt(0); - else - { - desc = (!is_pv_32bit_vcpu(v) ? this_cpu(gdt) : this_cpu(compat_gdt)) - + LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY; - _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, SYS_DESC_ldt); - lldt(LDT_SELECTOR); - } -} - -#endif /* !__ASSEMBLY__ */ - -#endif - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/livepatch.h b/xen/include/asm-x86/livepatch.h deleted file mode 100644 index 00aefd2d63..0000000000 --- a/xen/include/asm-x86/livepatch.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved. - * - */ - -#ifndef __XEN_X86_LIVEPATCH_H__ -#define __XEN_X86_LIVEPATCH_H__ - -#include /* For SZ_* macros. */ - -#define ARCH_PATCH_INSN_SIZE 5 -#define ARCH_LIVEPATCH_RANGE SZ_2G -#define LIVEPATCH_FEATURE X86_FEATURE_ALWAYS - -#endif /* __XEN_X86_LIVEPATCH_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/mach-default/bios_ebda.h b/xen/include/asm-x86/mach-default/bios_ebda.h deleted file mode 100644 index 42de6b2a5b..0000000000 --- a/xen/include/asm-x86/mach-default/bios_ebda.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _MACH_BIOS_EBDA_H -#define _MACH_BIOS_EBDA_H - -/* - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E. - */ -static inline unsigned int get_bios_ebda(void) -{ - unsigned int address = *(unsigned short *)maddr_to_virt(0x40E); - address <<= 4; - return address; /* 0 means none */ -} - -#endif /* _MACH_BIOS_EBDA_H */ diff --git a/xen/include/asm-x86/mach-default/io_ports.h b/xen/include/asm-x86/mach-default/io_ports.h deleted file mode 100644 index a96d9f6604..0000000000 --- a/xen/include/asm-x86/mach-default/io_ports.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * arch/i386/mach-generic/io_ports.h - * - * Machine specific IO port address definition for generic. - * Written by Osamu Tomita - */ -#ifndef _MACH_IO_PORTS_H -#define _MACH_IO_PORTS_H - -/* i8253A PIT registers */ -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 -#define PIT_CH2 0x42 - -/* i8259A PIC registers */ -#define PIC_MASTER_CMD 0x20 -#define PIC_MASTER_IMR 0x21 -#define PIC_MASTER_ISR PIC_MASTER_CMD -#define PIC_MASTER_POLL PIC_MASTER_ISR -#define PIC_MASTER_OCW3 PIC_MASTER_ISR -#define PIC_SLAVE_CMD 0xa0 -#define PIC_SLAVE_IMR 0xa1 - -/* i8259A PIC related value */ -#define PIC_CASCADE_IR 2 -#define MASTER_ICW4_DEFAULT 0x01 -#define SLAVE_ICW4_DEFAULT 0x01 -#define PIC_ICW4_AEOI 2 - -#endif /* !_MACH_IO_PORTS_H */ diff --git a/xen/include/asm-x86/mach-default/irq_vectors.h b/xen/include/asm-x86/mach-default/irq_vectors.h deleted file mode 100644 index f546aedd87..0000000000 --- a/xen/include/asm-x86/mach-default/irq_vectors.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef _ASM_IRQ_VECTORS_H -#define _ASM_IRQ_VECTORS_H - -/* Processor-initiated interrupts are all high priority. */ -#define SPURIOUS_APIC_VECTOR 0xff -#define ERROR_APIC_VECTOR 0xfe -#define INVALIDATE_TLB_VECTOR 0xfd -#define EVENT_CHECK_VECTOR 0xfc -#define CALL_FUNCTION_VECTOR 0xfb -#define LOCAL_TIMER_VECTOR 0xfa -#define PMU_APIC_VECTOR 0xf9 -/* - * High-priority dynamically-allocated vectors. For interrupts that - * must be higher priority than any guest-bound interrupt. - */ -#define FIRST_HIPRIORITY_VECTOR 0xf1 -#define LAST_HIPRIORITY_VECTOR 0xf8 -/* IRQ0 (timer) is statically allocated but must be high priority. */ -#define IRQ0_VECTOR 0xf0 - -/* Legacy PIC uses vectors 0x20-0x2f. */ -#define FIRST_LEGACY_VECTOR FIRST_DYNAMIC_VECTOR -#define LAST_LEGACY_VECTOR (FIRST_LEGACY_VECTOR + 0xf) - -#ifdef CONFIG_PV32 -#define HYPERCALL_VECTOR 0x82 -#endif - -#define LEGACY_SYSCALL_VECTOR 0x80 - -/* - * Dynamically-allocated vectors available to any driver. Note that the - * legacy vector range is a sub-range of this one, re-used on CPUs not - * sharing vectors with CPU 0. - */ -#define FIRST_DYNAMIC_VECTOR 0x20 -#define LAST_DYNAMIC_VECTOR 0xef -#define NR_DYNAMIC_VECTORS (LAST_DYNAMIC_VECTOR - FIRST_DYNAMIC_VECTOR + 1) - -/* There's no IRQ2 at the PIC. */ -#define IRQ_MOVE_CLEANUP_VECTOR (FIRST_LEGACY_VECTOR + 2) - -#define FIRST_IRQ_VECTOR FIRST_DYNAMIC_VECTOR -#define LAST_IRQ_VECTOR LAST_HIPRIORITY_VECTOR - -#endif /* _ASM_IRQ_VECTORS_H */ diff --git a/xen/include/asm-x86/mach-default/mach_mpspec.h b/xen/include/asm-x86/mach-default/mach_mpspec.h deleted file mode 100644 index 1a4e3f8c4f..0000000000 --- a/xen/include/asm-x86/mach-default/mach_mpspec.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_MPSPEC_H -#define __ASM_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -/* Generic (i.e. installer) kernels need lots of bus entries. */ -/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ -#define MAX_MP_BUSSES 260 - -#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/xen/include/asm-x86/mach-generic/mach_apic.h b/xen/include/asm-x86/mach-generic/mach_apic.h deleted file mode 100644 index b6f6361c60..0000000000 --- a/xen/include/asm-x86/mach-generic/mach_apic.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include -#include -#include -#include - -/* ESR was originally disabled in Linux for NUMA-Q. Do we really need to? */ -#define esr_disable (0) - -/* The following are dependent on APIC delivery mode (logical vs. physical). */ -#define INT_DELIVERY_MODE (genapic.int_delivery_mode) -#define INT_DEST_MODE (genapic.int_dest_mode) -#define TARGET_CPUS ((const typeof(cpu_online_map) *)&cpu_online_map) -#define init_apic_ldr (genapic.init_apic_ldr) -#define cpu_mask_to_apicid(mask) ({ \ - /* \ - * There are a number of places where the address of a local variable \ - * gets passed here. The use of ?: in alternative_call() triggers an \ - * "address of ... is always true" warning in such a case with at least \ - * gcc 7 and 8. Hence the seemingly pointless local variable here. \ - */ \ - const cpumask_t *m_ = (mask); \ - alternative_call(genapic.cpu_mask_to_apicid, m_); \ -}) -#define vector_allocation_cpumask(cpu) \ - alternative_call(genapic.vector_allocation_cpumask, cpu) - -static inline void enable_apic_mode(void) -{ - /* Not needed for modern ES7000 which boot in Virtual Wire mode. */ - /*es7000_sw_apic();*/ -} - -#define apicid_to_node(apicid) ((int)apicid_to_node[(u32)apicid]) - -extern u32 bios_cpu_apicid[]; - -static inline int multi_timer_check(int apic, int irq) -{ - return 0; -} - -extern void generic_apic_probe(void); -extern void generic_bigsmp_probe(void); - -/* - * The following functions based around phys_cpu_present_map are disabled in - * some i386 Linux subarchitectures, and in x86_64 'cluster' genapic mode. I'm - * really not sure why, since all local APICs should have distinct physical - * IDs, and we need to know what they are. - */ -static inline int apic_id_registered(void) -{ - return physid_isset(get_apic_id(), - phys_cpu_present_map); -} - -static inline void ioapic_phys_id_map(physid_mask_t *map) -{ - *map = phys_cpu_present_map; -} - -static inline int check_apicid_used(const physid_mask_t *map, int apicid) -{ - return physid_isset(apicid, *map); -} - -static inline int check_apicid_present(int apicid) -{ - return physid_isset(apicid, phys_cpu_present_map); -} - -static inline void set_apicid(int phys_apicid, physid_mask_t *map) -{ - physid_set(phys_apicid, *map); -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/xen/include/asm-x86/machine_kexec.h b/xen/include/asm-x86/machine_kexec.h deleted file mode 100644 index ba0d469d07..0000000000 --- a/xen/include/asm-x86/machine_kexec.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __X86_MACHINE_KEXEC_H__ -#define __X86_MACHINE_KEXEC_H__ - -#define KEXEC_RELOC_FLAG_COMPAT 0x1 /* 32-bit image */ - -#ifndef __ASSEMBLY__ - -extern void kexec_reloc(unsigned long reloc_code, unsigned long reloc_pt, - unsigned long ind_maddr, unsigned long entry_maddr, - unsigned long flags); - -extern unsigned int kexec_reloc_size; - -#endif - -#endif /* __X86_MACHINE_KEXEC_H__ */ diff --git a/xen/include/asm-x86/mc146818rtc.h b/xen/include/asm-x86/mc146818rtc.h deleted file mode 100644 index 803b236c0a..0000000000 --- a/xen/include/asm-x86/mc146818rtc.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Machine dependent access functions for RTC registers. - */ -#ifndef _ASM_MC146818RTC_H -#define _ASM_MC146818RTC_H - -#include -#include - -extern spinlock_t rtc_lock; /* serialize CMOS RAM access */ - -/********************************************************************** - * register summary - **********************************************************************/ -#define RTC_SECONDS 0 -#define RTC_SECONDS_ALARM 1 -#define RTC_MINUTES 2 -#define RTC_MINUTES_ALARM 3 -#define RTC_HOURS 4 -#define RTC_HOURS_ALARM 5 -/* RTC_*_alarm is always true if 2 MSBs are set */ -# define RTC_ALARM_DONT_CARE 0xC0 - -#define RTC_DAY_OF_WEEK 6 -#define RTC_DAY_OF_MONTH 7 -#define RTC_MONTH 8 -#define RTC_YEAR 9 - -/* control registers - Moto names - */ -#define RTC_REG_A 10 -#define RTC_REG_B 11 -#define RTC_REG_C 12 -#define RTC_REG_D 13 - -/********************************************************************** - * register details - **********************************************************************/ -#define RTC_FREQ_SELECT RTC_REG_A - -/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, - * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, - * totalling to a max high interval of 2.228 ms. - */ -# define RTC_UIP 0x80 -# define RTC_DIV_CTL 0x70 - /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ -# define RTC_REF_CLCK_4MHZ 0x00 -# define RTC_REF_CLCK_1MHZ 0x10 -# define RTC_REF_CLCK_32KHZ 0x20 - /* 2 values for divider stage reset, others for "testing purposes only" */ -# define RTC_DIV_RESET1 0x60 -# define RTC_DIV_RESET2 0x70 - /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ -# define RTC_RATE_SELECT 0x0F - -/**********************************************************************/ -#define RTC_CONTROL RTC_REG_B -# define RTC_SET 0x80 /* disable updates for clock setting */ -# define RTC_PIE 0x40 /* periodic interrupt enable */ -# define RTC_AIE 0x20 /* alarm interrupt enable */ -# define RTC_UIE 0x10 /* update-finished interrupt enable */ -# define RTC_SQWE 0x08 /* enable square-wave output */ -# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ -# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ -# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ - -/**********************************************************************/ -#define RTC_INTR_FLAGS RTC_REG_C -/* caution - cleared by read */ -# define RTC_IRQF 0x80 /* any of the following 3 is active */ -# define RTC_PF 0x40 -# define RTC_AF 0x20 -# define RTC_UF 0x10 - -/**********************************************************************/ -#define RTC_VALID RTC_REG_D -# define RTC_VRT 0x80 /* valid RAM and time */ -/**********************************************************************/ - -/* example: !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) - * determines if the following two #defines are needed - */ -#ifndef BCD_TO_BIN -#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) -#endif - -#ifndef BIN_TO_BCD -#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) -#endif - - -#ifndef RTC_PORT -#define RTC_PORT(x) (0x70 + (x)) -#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ -#endif - -/* - * The yet supported machines all access the RTC index register via - * an ISA port access but the way to access the date register differs ... - */ -#define CMOS_READ(addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -inb_p(RTC_PORT(1)); \ -}) -#define CMOS_WRITE(val, addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -outb_p((val),RTC_PORT(1)); \ -}) - -#define RTC_IRQ 8 - -unsigned int rtc_guest_read(unsigned int port); -void rtc_guest_write(unsigned int port, unsigned int data); - -#endif /* _ASM_MC146818RTC_H */ diff --git a/xen/include/asm-x86/mce.h b/xen/include/asm-x86/mce.h deleted file mode 100644 index 2c63318c08..0000000000 --- a/xen/include/asm-x86/mce.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef _XEN_X86_MCE_H -#define _XEN_X86_MCE_H - -#include -#include - -/* - * Emulate 2 banks for guest - * Bank0: reserved for 'bank0 quirk' occur at some very old processors: - * 1). Intel cpu whose family-model value < 06-1A; - * 2). AMD K7 - * Bank1: used to transfer error info to guest - */ -#define GUEST_MC_BANK_NUM 2 - -/* Filter MSCOD model specific error code to guest */ -#define MCi_STATUS_MSCOD_MASK (~(0xffffULL << 16)) - -/* No mci_ctl since it stick all 1's */ -struct vmce_bank { - uint64_t mci_status; - uint64_t mci_addr; - uint64_t mci_misc; - uint64_t mci_ctl2; -}; - -/* No mcg_ctl since it not expose to guest */ -struct vmce { - uint64_t mcg_cap; - uint64_t mcg_status; - uint64_t mcg_ext_ctl; - spinlock_t lock; - struct vmce_bank bank[GUEST_MC_BANK_NUM]; -}; - -struct domain; -struct vcpu; - -/* Guest vMCE MSRs virtualization */ -extern void vmce_init_vcpu(struct vcpu *); -extern int vmce_restore_vcpu(struct vcpu *, const struct hvm_vmce_vcpu *); -extern int vmce_wrmsr(uint32_t msr, uint64_t val); -extern int vmce_rdmsr(uint32_t msr, uint64_t *val); -extern bool vmce_has_lmce(const struct vcpu *v); -extern int vmce_enable_mca_cap(struct domain *d, uint64_t cap); - -DECLARE_PER_CPU(unsigned int, nr_mce_banks); - -#endif diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h deleted file mode 100644 index 18091610ae..0000000000 --- a/xen/include/asm-x86/mem_access.h +++ /dev/null @@ -1,68 +0,0 @@ -/****************************************************************************** - * include/asm-x86/mem_access.h - * - * Memory access support. - * - * Copyright (c) 2011 GridCentric Inc. (Andres Lagar-Cavilla) - * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) - * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc. - * Parts of this code are Copyright (c) 2006 by Michael A Fetterman - * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef __ASM_X86_MEM_ACCESS_H__ -#define __ASM_X86_MEM_ACCESS_H__ - -/* - * Setup vm_event request based on the access (gla is -1ull if not available). - * Handles the rw2rx conversion. Boolean return value indicates if event type - * is syncronous (aka. requires vCPU pause). If the req_ptr has been populated, - * then the caller should use monitor_traps to send the event on the MONITOR - * ring. Once having released get_gfn* locks caller must also xfree the - * request. - */ -bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, - struct npfec npfec, - struct vm_event_st **req_ptr); - -/* Check for emulation and mark vcpu for skipping one instruction - * upon rescheduling if required. */ -bool p2m_mem_access_emulate_check(struct vcpu *v, - const struct vm_event_st *rsp); - -/* Sanity check for mem_access hardware support */ -bool p2m_mem_access_sanity_check(const struct domain *d); - -int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve, - unsigned int altp2m_idx); - -struct xen_hvm_altp2m_suppress_ve_multi; -int p2m_set_suppress_ve_multi(struct domain *d, - struct xen_hvm_altp2m_suppress_ve_multi *suppress_ve); - -int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve, - unsigned int altp2m_idx); - -#endif /*__ASM_X86_MEM_ACCESS_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/mem_paging.h b/xen/include/asm-x86/mem_paging.h deleted file mode 100644 index d3635e96cf..0000000000 --- a/xen/include/asm-x86/mem_paging.h +++ /dev/null @@ -1,42 +0,0 @@ -/****************************************************************************** - * include/asm-x86/mem_paging.h - * - * Memory paging support. - * - * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef __ASM_X86_MEM_PAGING_H__ -#define __ASM_X86_MEM_PAGING_H__ - -int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg); - -#ifdef CONFIG_MEM_PAGING -# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging) -#else -# define mem_paging_enabled(d) false -#endif - -#endif /*__ASM_X86_MEM_PAGING_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h deleted file mode 100644 index cf7a12f4d2..0000000000 --- a/xen/include/asm-x86/mem_sharing.h +++ /dev/null @@ -1,153 +0,0 @@ -/****************************************************************************** - * include/asm-x86/mem_sharing.h - * - * Memory sharing support. - * - * Copyright (c) 2009 Citrix Systems, Inc. (Grzegorz Milos) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ -#ifndef __MEM_SHARING_H__ -#define __MEM_SHARING_H__ - -#include -#include - -#ifdef CONFIG_MEM_SHARING - -#define mem_sharing_enabled(d) ((d)->arch.hvm.mem_sharing.enabled) - -/* Auditing of memory sharing code? */ -#ifndef NDEBUG -#define MEM_SHARING_AUDIT 1 -#else -#define MEM_SHARING_AUDIT 0 -#endif - -typedef uint64_t shr_handle_t; - -typedef struct rmap_hashtab { - struct list_head *bucket; - /* - * Overlaps with prev pointer of list_head in union below. - * Unlike the prev pointer, this can be NULL. - */ - void *flag; -} rmap_hashtab_t; - -struct page_sharing_info -{ - struct page_info *pg; /* Back pointer to the page. */ - shr_handle_t handle; /* Globally unique version / handle. */ -#if MEM_SHARING_AUDIT - struct list_head entry; /* List of all shared pages (entry). */ - struct rcu_head rcu_head; /* List of all shared pages (entry). */ -#endif - /* Reverse map of tuples for this shared frame. */ - union { - struct list_head gfns; - rmap_hashtab_t hash_table; - }; -}; - -unsigned int mem_sharing_get_nr_saved_mfns(void); -unsigned int mem_sharing_get_nr_shared_mfns(void); - -/* Only fails with -ENOMEM. Enforce it with a BUG_ON wrapper. */ -int __mem_sharing_unshare_page(struct domain *d, - unsigned long gfn, - bool destroy); - -static inline int mem_sharing_unshare_page(struct domain *d, - unsigned long gfn) -{ - int rc = __mem_sharing_unshare_page(d, gfn, false); - BUG_ON(rc && (rc != -ENOMEM)); - return rc; -} - -static inline bool mem_sharing_is_fork(const struct domain *d) -{ - return d->parent; -} - -int mem_sharing_fork_page(struct domain *d, gfn_t gfn, - bool unsharing); - -/* - * If called by a foreign domain, possible errors are - * -EBUSY -> ring full - * -ENOSYS -> no ring to begin with - * and the foreign mapper is responsible for retrying. - * - * If called by the guest vcpu itself and allow_sleep is set, may - * sleep on a wait queue, so the caller is responsible for not - * holding locks on entry. It may only fail with ENOSYS - * - * If called by the guest vcpu itself and allow_sleep is not set, - * then it's the same as a foreign domain. - */ -int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, - bool allow_sleep); -int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg); -int mem_sharing_domctl(struct domain *d, - struct xen_domctl_mem_sharing_op *mec); - -/* - * Scans the p2m and relinquishes any shared pages, destroying - * those for which this domain holds the final reference. - * Preemptible. - */ -int relinquish_shared_pages(struct domain *d); - -#else - -#define mem_sharing_enabled(d) false - -static inline unsigned int mem_sharing_get_nr_saved_mfns(void) -{ - return 0; -} - -static inline unsigned int mem_sharing_get_nr_shared_mfns(void) -{ - return 0; -} - -static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn) -{ - ASSERT_UNREACHABLE(); - return -EOPNOTSUPP; -} - -static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, - bool allow_sleep) -{ - ASSERT_UNREACHABLE(); - return -EOPNOTSUPP; -} - -static inline bool mem_sharing_is_fork(const struct domain *d) -{ - return false; -} - -static inline int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool lock) -{ - return -EOPNOTSUPP; -} - -#endif - -#endif /* __MEM_SHARING_H__ */ diff --git a/xen/include/asm-x86/microcode.h b/xen/include/asm-x86/microcode.h deleted file mode 100644 index 3b0234e9fa..0000000000 --- a/xen/include/asm-x86/microcode.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef ASM_X86__MICROCODE_H -#define ASM_X86__MICROCODE_H - -#include -#include - -#include - -struct cpu_signature { - /* CPU signature (CPUID.1.EAX). */ - unsigned int sig; - - /* Platform Flags. Only applicable to Intel. */ - unsigned int pf; - - /* Microcode Revision. */ - unsigned int rev; -}; - -DECLARE_PER_CPU(struct cpu_signature, cpu_sig); - -void microcode_set_module(unsigned int idx); -int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len); -int early_microcode_init(void); -int microcode_update_one(void); - -#endif /* ASM_X86__MICROCODE_H */ diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h deleted file mode 100644 index cb90527499..0000000000 --- a/xen/include/asm-x86/mm.h +++ /dev/null @@ -1,655 +0,0 @@ - -#ifndef __ASM_X86_MM_H__ -#define __ASM_X86_MM_H__ - -#include -#include -#include -#include -#include -#include -#include - -/* - * Per-page-frame information. - * - * Every architecture must ensure the following: - * 1. 'struct page_info' contains a 'struct page_list_entry list'. - * 2. Provide a PFN_ORDER() macro for accessing the order of a free page. - */ -#define PFN_ORDER(_pfn) ((_pfn)->v.free.order) - -#define PG_shift(idx) (BITS_PER_LONG - (idx)) -#define PG_mask(x, idx) (x ## UL << PG_shift(idx)) - - /* The following page types are MUTUALLY EXCLUSIVE. */ -#define PGT_none PG_mask(0, 3) /* no special uses of this page */ -#define PGT_l1_page_table PG_mask(1, 3) /* using as an L1 page table? */ -#define PGT_l2_page_table PG_mask(2, 3) /* using as an L2 page table? */ -#define PGT_l3_page_table PG_mask(3, 3) /* using as an L3 page table? */ -#define PGT_l4_page_table PG_mask(4, 3) /* using as an L4 page table? */ -#define PGT_seg_desc_page PG_mask(5, 3) /* using this page in a GDT/LDT? */ -#define PGT_shared_page PG_mask(6, 3) /* CoW sharable page */ -#define PGT_writable_page PG_mask(7, 3) /* has writable mappings? */ -#define PGT_type_mask PG_mask(7, 3) /* Bits 61-63. */ - - /* Page is locked? */ -#define _PGT_locked PG_shift(4) -#define PGT_locked PG_mask(1, 4) - /* Owning guest has pinned this page to its current type? */ -#define _PGT_pinned PG_shift(5) -#define PGT_pinned PG_mask(1, 5) - /* Has this page been validated for use as its current type? */ -#define _PGT_validated PG_shift(6) -#define PGT_validated PG_mask(1, 6) - /* PAE only: is this an L2 page directory containing Xen-private mappings? */ -#ifdef CONFIG_PV32 -#define _PGT_pae_xen_l2 PG_shift(7) -#define PGT_pae_xen_l2 PG_mask(1, 7) -#else -#define PGT_pae_xen_l2 0 -#endif -/* Has this page been *partially* validated for use as its current type? */ -#define _PGT_partial PG_shift(8) -#define PGT_partial PG_mask(1, 8) - - /* Count of uses of this frame as its current type. */ -#define PGT_count_width PG_shift(8) -#define PGT_count_mask ((1UL<count_info&PGC_state) == PGC_state_##st) -/* Page is not reference counted (see below for caveats) */ -#define _PGC_extra PG_shift(10) -#define PGC_extra PG_mask(1, 10) - -/* Count of references to this frame. */ -#define PGC_count_width PG_shift(10) -#define PGC_count_mask ((1UL<count_info & PGC_xen_heap) -#define is_xen_heap_mfn(mfn) \ - (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) -#define is_xen_fixed_mfn(mfn) \ - (((mfn_to_maddr(mfn)) >= __pa(_stext)) && \ - ((mfn_to_maddr(mfn)) <= __pa(__2M_rwdata_end - 1))) - -#define PRtype_info "016lx"/* should only be used for printk's */ - -/* The number of out-of-sync shadows we allow per vcpu (prime, please) */ -#define SHADOW_OOS_PAGES 3 - -/* OOS fixup entries */ -#define SHADOW_OOS_FIXUPS 2 - -#define page_get_owner(_p) \ - ((struct domain *)((_p)->v.inuse._domain ? \ - pdx_to_virt((_p)->v.inuse._domain) : NULL)) -#define page_set_owner(_p,_d) \ - ((_p)->v.inuse._domain = (_d) ? virt_to_pdx(_d) : 0) - -#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma)))) - -#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) -extern unsigned long max_page; -extern unsigned long total_pages; -void init_frametable(void); - -#define PDX_GROUP_SHIFT L2_PAGETABLE_SHIFT - -/* Convert between Xen-heap virtual addresses and page-info structures. */ -static inline struct page_info *__virt_to_page(const void *v) -{ - unsigned long va = (unsigned long)v; - - ASSERT(va >= XEN_VIRT_START); - ASSERT(va < DIRECTMAP_VIRT_END); - if ( va < XEN_VIRT_END ) - va += DIRECTMAP_VIRT_START - XEN_VIRT_START + xen_phys_start; - else - ASSERT(va >= DIRECTMAP_VIRT_START); - return frame_table + ((va - DIRECTMAP_VIRT_START) >> PAGE_SHIFT); -} - -static inline void *__page_to_virt(const struct page_info *pg) -{ - ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_SIZE); - /* - * (sizeof(*pg) & -sizeof(*pg)) selects the LS bit of sizeof(*pg). The - * division and re-multiplication avoids one shift when sizeof(*pg) is a - * power of two (otherwise there would be a right shift followed by a - * left shift, which the compiler can't know it can fold into one). - */ - return (void *)(DIRECTMAP_VIRT_START + - ((unsigned long)pg - FRAMETABLE_VIRT_START) / - (sizeof(*pg) / (sizeof(*pg) & -sizeof(*pg))) * - (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg)))); -} - -int devalidate_page(struct page_info *page, unsigned long type, - int preemptible); - -void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d); -void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn, - const struct domain *d, mfn_t sl4mfn, bool ro_mpt); -bool fill_ro_mpt(mfn_t mfn); -void zap_ro_mpt(mfn_t mfn); - -bool is_iomem_page(mfn_t mfn); - -/* - * Pages with no owner which may get passed to functions wanting to - * refcount them can be marked PGC_extra to bypass this refcounting (which - * would fail due to the lack of an owner). - * - * (For pages with owner PGC_extra has different meaning.) - */ -static inline void page_suppress_refcounting(struct page_info *pg) -{ - ASSERT(!page_get_owner(pg)); - pg->count_info |= PGC_extra; -} - -static inline bool page_refcounting_suppressed(const struct page_info *pg) -{ - return !page_get_owner(pg) && (pg->count_info & PGC_extra); -} - -struct platform_bad_page { - unsigned long mfn; - unsigned int order; -}; - -const struct platform_bad_page *get_platform_badpages(unsigned int *array_size); - -/* Per page locks: - * page_lock() is used for pte serialization. - * - * All users of page lock for pte serialization live in mm.c, use it - * to lock a page table page during pte updates, do not take other locks within - * the critical section delimited by page_lock/unlock, and perform no - * nesting. - * - * The use of PGT_locked in mem_sharing does not collide, since mem_sharing is - * only supported for hvm guests, which do not have PV PTEs updated. - */ -int page_lock(struct page_info *page); -void page_unlock(struct page_info *page); - -void put_page_type(struct page_info *page); -int get_page_type(struct page_info *page, unsigned long type); -int put_page_type_preemptible(struct page_info *page); -int get_page_type_preemptible(struct page_info *page, unsigned long type); -int put_old_guest_table(struct vcpu *); -int get_page_from_l1e( - l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner); -void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner); - -static inline struct page_info *get_page_from_mfn(mfn_t mfn, struct domain *d) -{ - struct page_info *page = mfn_to_page(mfn); - - if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) ) - { - gdprintk(XENLOG_WARNING, - "Could not get page ref for mfn %"PRI_mfn"\n", mfn_x(mfn)); - return NULL; - } - - return page; -} - -static inline void put_page_and_type(struct page_info *page) -{ - put_page_type(page); - put_page(page); -} - -static inline int put_page_and_type_preemptible(struct page_info *page) -{ - int rc = put_page_type_preemptible(page); - - if ( likely(rc == 0) ) - put_page(page); - return rc; -} - -static inline int get_page_and_type(struct page_info *page, - struct domain *domain, - unsigned long type) -{ - int rc = get_page(page, domain); - - if ( likely(rc) && unlikely(!get_page_type(page, type)) ) - { - put_page(page); - rc = 0; - } - - return rc; -} - -#define ASSERT_PAGE_IS_TYPE(_p, _t) \ - ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \ - ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0) -#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \ - ASSERT(((_p)->count_info & PGC_count_mask) != 0); \ - ASSERT(page_get_owner(_p) == (_d)) - -extern paddr_t mem_hotplug; - -/****************************************************************************** - * With shadow pagetables, the different kinds of address start - * to get get confusing. - * - * Virtual addresses are what they usually are: the addresses that are used - * to accessing memory while the guest is running. The MMU translates from - * virtual addresses to machine addresses. - * - * (Pseudo-)physical addresses are the abstraction of physical memory the - * guest uses for allocation and so forth. For the purposes of this code, - * we can largely ignore them. - * - * Guest frame numbers (gfns) are the entries that the guest puts in its - * pagetables. For normal paravirtual guests, they are actual frame numbers, - * with the translation done by the guest. - * - * Machine frame numbers (mfns) are the entries that the hypervisor puts - * in the shadow page tables. - * - * Elsewhere in the xen code base, the name "gmfn" is generally used to refer - * to a "machine frame number, from the guest's perspective", or in other - * words, pseudo-physical frame numbers. However, in the shadow code, the - * term "gmfn" means "the mfn of a guest page"; this combines naturally with - * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a - * guest L2 page), etc... - */ - -/* - * The MPT (machine->physical mapping table) is an array of word-sized - * values, indexed on machine frame number. It is expected that guest OSes - * will use it to store a "physical" frame number to give the appearance of - * contiguous (or near contiguous) physical memory. - */ -#undef machine_to_phys_mapping -#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START) -#define INVALID_M2P_ENTRY (~0UL) -#define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1)))) -#define SHARED_M2P_ENTRY (~0UL - 1UL) -#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY) - -/* - * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until - * the machine_to_phys_mapping is actually set up. - */ -extern bool machine_to_phys_mapping_valid; - -void set_gpfn_from_mfn(unsigned long mfn, unsigned long pfn); - -extern struct rangeset *mmio_ro_ranges; - -#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) - -#define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) -#define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) - -#ifdef MEMORY_GUARD -void memguard_guard_range(void *p, unsigned long l); -void memguard_unguard_range(void *p, unsigned long l); -#else -#define memguard_guard_range(_p,_l) ((void)0) -#define memguard_unguard_range(_p,_l) ((void)0) -#endif - -void memguard_guard_stack(void *p); -void memguard_unguard_stack(void *p); - -struct mmio_ro_emulate_ctxt { - unsigned long cr2; - unsigned int seg, bdf; -}; - -extern int mmio_ro_emulated_write(enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt); -extern int mmcfg_intercept_write(enum x86_segment seg, - unsigned long offset, - void *p_data, - unsigned int bytes, - struct x86_emulate_ctxt *ctxt); - -int audit_adjust_pgtables(struct domain *d, int dir, int noisy); - -extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs); -extern int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs); - -#ifndef NDEBUG - -#define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 ) -#define AUDIT_ERRORS_OK ( 1u << 1 ) -#define AUDIT_QUIET ( 1u << 2 ) - -void _audit_domain(struct domain *d, int flags); -#define audit_domain(_d) _audit_domain((_d), AUDIT_ERRORS_OK) -void audit_domains(void); - -#else - -#define _audit_domain(_d, _f) ((void)0) -#define audit_domain(_d) ((void)0) -#define audit_domains() ((void)0) - -#endif - -void make_cr3(struct vcpu *v, mfn_t mfn); -void update_cr3(struct vcpu *v); -int vcpu_destroy_pagetables(struct vcpu *); -void *do_page_walk(struct vcpu *v, unsigned long addr); - -/* Allocator functions for Xen pagetables. */ -mfn_t alloc_xen_pagetable(void); -void free_xen_pagetable(mfn_t mfn); -void *alloc_mapped_pagetable(mfn_t *pmfn); - -l1_pgentry_t *virt_to_xen_l1e(unsigned long v); - -int __sync_local_execstate(void); - -/* Arch-specific portion of memory_op hypercall. */ -long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg); -long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg); -int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void)); -int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void)); - -#define NIL(type) ((type *)-sizeof(type)) -#define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr)))) - -int create_perdomain_mapping(struct domain *, unsigned long va, - unsigned int nr, l1_pgentry_t **, - struct page_info **); -void destroy_perdomain_mapping(struct domain *, unsigned long va, - unsigned int nr); -void free_perdomain_mappings(struct domain *); - -extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm); - -void domain_set_alloc_bitsize(struct domain *d); -unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits); - -unsigned long domain_get_maximum_gpfn(struct domain *d); - -/* Definition of an mm lock: spinlock with extra fields for debugging */ -typedef struct mm_lock { - spinlock_t lock; - int unlock_level; - int locker; /* processor which holds the lock */ - const char *locker_function; /* func that took it */ -} mm_lock_t; - -typedef struct mm_rwlock { - percpu_rwlock_t lock; - int unlock_level; - int recurse_count; - int locker; /* CPU that holds the write lock */ - const char *locker_function; /* func that took it */ -} mm_rwlock_t; - -#define arch_free_heap_page(d, pg) \ - page_list_del2(pg, page_to_list(d, pg), &(d)->arch.relmem_list) - -extern const char zero_page[]; - -/* Build a 32bit PSE page table using 4MB pages. */ -void write_32bit_pse_identmap(uint32_t *l2); - -/* - * x86 maps part of physical memory via the directmap region. - * Return whether the input MFN falls in that range. - */ -static inline bool arch_mfn_in_directmap(unsigned long mfn) -{ - unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END); - - return mfn <= (virt_to_mfn(eva - 1) + 1); -} - -#endif /* __ASM_X86_MM_H__ */ diff --git a/xen/include/asm-x86/monitor.h b/xen/include/asm-x86/monitor.h deleted file mode 100644 index 01c6d63bb9..0000000000 --- a/xen/include/asm-x86/monitor.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * include/asm-x86/monitor.h - * - * Arch-specific monitor_op domctl handler. - * - * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) - * Copyright (c) 2016, Bitdefender S.R.L. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __ASM_X86_MONITOR_H__ -#define __ASM_X86_MONITOR_H__ - -#include - -#define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index)) - -struct monitor_msr_bitmap { - DECLARE_BITMAP(low, 8192); - DECLARE_BITMAP(hypervisor, 8192); - DECLARE_BITMAP(high, 8192); -}; - -static inline -void arch_monitor_allow_userspace(struct domain *d, bool allow_userspace) -{ - d->arch.monitor.guest_request_userspace_enabled = allow_userspace; -} - -static inline -int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop) -{ - int rc = 0; - - switch ( mop->op ) - { - case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP: - domain_pause(d); - /* - * Enabling mem_access_emulate_each_rep without a vm_event subscriber - * is meaningless. - */ - if ( d->max_vcpus && d->vcpu[0] && d->vcpu[0]->arch.vm_event ) - d->arch.mem_access_emulate_each_rep = !!mop->event; - else - rc = -EINVAL; - - domain_unpause(d); - break; - - case XEN_DOMCTL_MONITOR_OP_CONTROL_REGISTERS: - d->arch.monitor.control_register_values = true; - break; - - default: - rc = -EOPNOTSUPP; - } - - return rc; -} - -static inline uint32_t arch_monitor_get_capabilities(struct domain *d) -{ - uint32_t capabilities = 0; - - /* - * At the moment only Intel and AMD HVM domains are supported. However, - * event delivery could be extended to PV domains. - */ - if ( !is_hvm_domain(d) ) - return capabilities; - - capabilities = ((1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) | - (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) | - (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) | - (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) | - (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) | - (1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) | - (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) | - (1U << XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED) | - (1U << XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT)); - - if ( hvm_is_singlestep_supported() ) - capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP); - - if ( hvm_has_set_descriptor_access_exiting() ) - capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS); - - return capabilities; -} - -int arch_monitor_domctl_event(struct domain *d, - struct xen_domctl_monitor_op *mop); - -#ifdef CONFIG_HVM - -int arch_monitor_init_domain(struct domain *d); - -void arch_monitor_cleanup_domain(struct domain *d); - -#else - -static inline int arch_monitor_init_domain(struct domain *d) -{ - return -EOPNOTSUPP; -} - -static inline void arch_monitor_cleanup_domain(struct domain *d) {} - -#endif - -bool monitored_msr(const struct domain *d, u32 msr); -bool monitored_msr_onchangeonly(const struct domain *d, u32 msr); - -#endif /* __ASM_X86_MONITOR_H__ */ diff --git a/xen/include/asm-x86/mpspec.h b/xen/include/asm-x86/mpspec.h deleted file mode 100644 index 1246eece0b..0000000000 --- a/xen/include/asm-x86/mpspec.h +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef __ASM_MPSPEC_H -#define __ASM_MPSPEC_H - -#include -#include -#include - -extern unsigned char mp_bus_id_to_type[MAX_MP_BUSSES]; - -extern bool def_to_bigsmp; -extern unsigned int boot_cpu_physical_apicid; -extern bool smp_found_config; -extern void find_smp_config (void); -extern void get_smp_config (void); -extern unsigned char apic_version [MAX_APICS]; -extern int mp_irq_entries; -extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; -extern int mpc_default_type; -extern unsigned long mp_lapic_addr; -extern bool pic_mode; - -#ifdef CONFIG_ACPI -extern int mp_register_lapic(u32 id, bool enabled, bool hotplug); -extern void mp_unregister_lapic(uint32_t apic_id, uint32_t cpu); -extern void mp_register_lapic_address (u64 address); -extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); -extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); -extern void mp_config_acpi_legacy_irqs (void); -extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); -#endif /* CONFIG_ACPI */ - -#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) - -struct physid_mask -{ - unsigned long mask[PHYSID_ARRAY_SIZE]; -}; - -typedef struct physid_mask physid_mask_t; - -#define physid_set(physid, map) set_bit(physid, (map).mask) -#define physid_clear(physid, map) clear_bit(physid, (map).mask) -#define physid_isset(physid, map) test_bit(physid, (map).mask) -#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) - -#define first_physid(map) find_first_bit((map).mask, \ - MAX_APICS) -#define next_physid(id, map) find_next_bit((map).mask, \ - MAX_APICS, (id) + 1) -#define last_physid(map) ({ \ - const unsigned long *mask = (map).mask; \ - unsigned int id, last = MAX_APICS; \ - for (id = find_first_bit(mask, MAX_APICS); id < MAX_APICS; \ - id = find_next_bit(mask, MAX_APICS, (id) + 1)) \ - last = id; \ - last; \ -}) - -#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) -#define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) -#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) -#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) -#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) - -#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } -#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } - -extern physid_mask_t phys_cpu_present_map; - -#endif - diff --git a/xen/include/asm-x86/mpspec_def.h b/xen/include/asm-x86/mpspec_def.h deleted file mode 100644 index b17ec41426..0000000000 --- a/xen/include/asm-x86/mpspec_def.h +++ /dev/null @@ -1,188 +0,0 @@ -#ifndef __ASM_MPSPEC_DEF_H -#define __ASM_MPSPEC_DEF_H - -/* - * Structure definitions for SMP machines following the - * Intel Multiprocessing Specification 1.1 and 1.4. - */ - -/* - * This tag identifies where the SMP configuration - * information is. - */ - -#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') - -#define MAX_MPC_ENTRY 1024 -#define MAX_APICS MAX(256, 4 * NR_CPUS) - -struct intel_mp_floating -{ - char mpf_signature[4]; /* "_MP_" */ - unsigned int mpf_physptr; /* Configuration table address */ - unsigned char mpf_length; /* Our length (paragraphs) */ - unsigned char mpf_specification;/* Specification version */ - unsigned char mpf_checksum; /* Checksum (makes sum 0) */ - unsigned char mpf_feature1; /* Standard or configuration ? */ - unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ - unsigned char mpf_feature3; /* Unused (0) */ - unsigned char mpf_feature4; /* Unused (0) */ - unsigned char mpf_feature5; /* Unused (0) */ -}; - -struct mp_config_table -{ - char mpc_signature[4]; -#define MPC_SIGNATURE "PCMP" - unsigned short mpc_length; /* Size of table */ - char mpc_spec; /* 0x01 */ - char mpc_checksum; - char mpc_oem[8]; - char mpc_productid[12]; - unsigned int mpc_oemptr; /* 0 if not present */ - unsigned short mpc_oemsize; /* 0 if not present */ - unsigned short mpc_oemcount; - unsigned int mpc_lapic; /* APIC address */ - unsigned int reserved; -}; - -/* Followed by entries */ - -#define MP_PROCESSOR 0 -#define MP_BUS 1 -#define MP_IOAPIC 2 -#define MP_INTSRC 3 -#define MP_LINTSRC 4 -#define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ - -struct mpc_config_processor -{ - unsigned char mpc_type; - unsigned char mpc_apicid; /* Local APIC number */ - unsigned char mpc_apicver; /* Its versions */ - unsigned char mpc_cpuflag; -#define CPU_ENABLED 1 /* Processor is available */ -#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ - unsigned int mpc_cpufeature; -#define CPU_STEPPING_MASK 0x0F -#define CPU_MODEL_MASK 0xF0 -#define CPU_FAMILY_MASK 0xF00 - unsigned int mpc_featureflag; /* CPUID feature value */ - unsigned int mpc_reserved[2]; -}; - -struct mpc_config_bus -{ - unsigned char mpc_type; - unsigned char mpc_busid; - unsigned char mpc_bustype[6]; -}; - -/* List of Bus Type string values, Intel MP Spec. */ -#define BUSTYPE_EISA "EISA" -#define BUSTYPE_ISA "ISA" -#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ -#define BUSTYPE_MCA "MCA" -#define BUSTYPE_VL "VL" /* Local bus */ -#define BUSTYPE_PCI "PCI" -#define BUSTYPE_PCMCIA "PCMCIA" -#define BUSTYPE_CBUS "CBUS" -#define BUSTYPE_CBUSII "CBUSII" -#define BUSTYPE_FUTURE "FUTURE" -#define BUSTYPE_MBI "MBI" -#define BUSTYPE_MBII "MBII" -#define BUSTYPE_MPI "MPI" -#define BUSTYPE_MPSA "MPSA" -#define BUSTYPE_NUBUS "NUBUS" -#define BUSTYPE_TC "TC" -#define BUSTYPE_VME "VME" -#define BUSTYPE_XPRESS "XPRESS" -#define BUSTYPE_NEC98 "NEC98" - -struct mpc_config_ioapic -{ - unsigned char mpc_type; - unsigned char mpc_apicid; - unsigned char mpc_apicver; - unsigned char mpc_flags; -#define MPC_APIC_USABLE 0x01 - unsigned int mpc_apicaddr; -}; - -struct mpc_config_intsrc -{ - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbus; - unsigned char mpc_srcbusirq; - unsigned char mpc_dstapic; - unsigned char mpc_dstirq; -}; - -enum mp_irq_source_types { - mp_INT = 0, - mp_NMI = 1, - mp_SMI = 2, - mp_ExtINT = 3 -}; - -#define MP_IRQDIR_DEFAULT 0 -#define MP_IRQDIR_HIGH 1 -#define MP_IRQDIR_LOW 3 - - -struct mpc_config_lintsrc -{ - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbusid; - unsigned char mpc_srcbusirq; - unsigned char mpc_destapic; -#define MP_APIC_ALL 0xFF - unsigned char mpc_destapiclint; -}; - -struct mp_config_oemtable -{ - char oem_signature[4]; -#define MPC_OEM_SIGNATURE "_OEM" - unsigned short oem_length; /* Size of table */ - char oem_rev; /* 0x01 */ - char oem_checksum; - char mpc_oem[8]; -}; - -struct mpc_config_translation -{ - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; -}; - -/* - * Default configurations - * - * 1 2 CPU ISA 82489DX - * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining - * 3 2 CPU EISA 82489DX - * 4 2 CPU MCA 82489DX - * 5 2 CPU ISA+PCI - * 6 2 CPU EISA+PCI - * 7 2 CPU MCA+PCI - */ - -enum mp_bustype { - MP_BUS_ISA = 1, - MP_BUS_EISA, - MP_BUS_PCI, - MP_BUS_MCA, - MP_BUS_NEC98 -}; -#endif - diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h deleted file mode 100644 index e228b0f3f3..0000000000 --- a/xen/include/asm-x86/msi.h +++ /dev/null @@ -1,256 +0,0 @@ -#ifndef __ASM_MSI_H -#define __ASM_MSI_H - -#include -#include -#include -#include - -/* - * Constants for Intel APIC based MSI messages. - */ - -/* - * Shifts for MSI data - */ - -#define MSI_DATA_VECTOR_SHIFT 0 -#define MSI_DATA_VECTOR_MASK 0x000000ff -#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) - -#define MSI_DATA_DELIVERY_MODE_SHIFT 8 -#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) -#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) -#define MSI_DATA_DELIVERY_MODE_MASK 0x00000700 - -#define MSI_DATA_LEVEL_SHIFT 14 -#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) -#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) - -#define MSI_DATA_TRIGGER_SHIFT 15 -#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) -#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) -#define MSI_DATA_TRIGGER_MASK 0x00008000 - -/* - * Shift/mask fields for msi address - */ - -#define MSI_ADDR_BASE_HI 0 -#define MSI_ADDR_BASE_LO 0xfee00000 -#define MSI_ADDR_BASE_MASK (~0xfffff) -#define MSI_ADDR_HEADER MSI_ADDR_BASE_LO - -#define MSI_ADDR_DESTMODE_SHIFT 2 -#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT) -#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT) -#define MSI_ADDR_DESTMODE_MASK 0x4 - -#define MSI_ADDR_REDIRECTION_SHIFT 3 -#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) -#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) -#define MSI_ADDR_REDIRECTION_MASK (1 << MSI_ADDR_REDIRECTION_SHIFT) - -#define MSI_ADDR_DEST_ID_SHIFT 12 -#define MSI_ADDR_DEST_ID_MASK 0x00ff000 -#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) - -/* MAX fixed pages reserved for mapping MSIX tables. */ -#define FIX_MSIX_MAX_PAGES 512 - -struct msi_info { - u16 seg; - u8 bus; - u8 devfn; - int irq; - int entry_nr; - uint64_t table_base; -}; - -struct msi_msg { - union { - u64 address; /* message address */ - struct { - u32 address_lo; /* message address low 32 bits */ - u32 address_hi; /* message address high 32 bits */ - }; - }; - u32 data; /* 16 bits of msi message data */ - u32 dest32; /* used when Interrupt Remapping with EIM is enabled */ -}; - -struct irq_desc; -struct hw_interrupt_type; -struct msi_desc; -/* Helper functions */ -extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc); -extern void pci_disable_msi(struct msi_desc *desc); -extern int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool off); -extern void pci_cleanup_msi(struct pci_dev *pdev); -extern int setup_msi_irq(struct irq_desc *, struct msi_desc *); -extern int __setup_msi_irq(struct irq_desc *, struct msi_desc *, - const struct hw_interrupt_type *); -extern void teardown_msi_irq(int irq); -extern int msi_free_vector(struct msi_desc *entry); -extern int pci_restore_msi_state(struct pci_dev *pdev); -extern int pci_reset_msix_state(struct pci_dev *pdev); - -struct msi_desc { - struct msi_attrib { - __u8 type; /* {0: unused, 5h:MSI, 11h:MSI-X} */ - __u8 pos; /* Location of the MSI capability */ - __u8 maskbit : 1; /* mask/pending bit supported ? */ - __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ - __u8 host_masked : 1; - __u8 guest_masked : 1; - __u16 entry_nr; /* specific enabled entry */ - } msi_attrib; - - bool irte_initialized; - uint8_t gvec; /* guest vector. valid when pi_desc isn't NULL */ - const struct pi_desc *pi_desc; /* pointer to posted descriptor */ - - struct list_head list; - - union { - void __iomem *mask_base;/* va for the entry in mask table */ - struct { - unsigned int nvec;/* number of vectors */ - unsigned int mpos;/* location of mask register */ - } msi; - unsigned int hpet_id; /* HPET (dev is NULL) */ - }; - struct pci_dev *dev; - int irq; - int remap_index; /* index in interrupt remapping table */ - - struct msi_msg msg; /* Last set MSI message */ -}; - -/* - * Values stored into msi_desc.msi_attrib.pos for non-PCI devices - * (msi_desc.msi_attrib.type is zero): - */ -#define MSI_TYPE_UNKNOWN 0 -#define MSI_TYPE_HPET 1 -#define MSI_TYPE_IOMMU 2 - -int msi_maskable_irq(const struct msi_desc *); -int msi_free_irq(struct msi_desc *entry); - -/* - * Assume the maximum number of hot plug slots supported by the system is about - * ten. The worstcase is that each of these slots is hot-added with a device, - * which has two MSI/MSI-X capable functions. To avoid any MSI-X driver, which - * attempts to request all available vectors, NR_HP_RESERVED_VECTORS is defined - * as below to ensure at least one message is assigned to each detected MSI/ - * MSI-X device function. - */ -#define NR_HP_RESERVED_VECTORS 20 - -#define msi_control_reg(base) (base + PCI_MSI_FLAGS) -#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) -#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) -#define msi_data_reg(base, is64bit) \ - ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 ) -#define msi_mask_bits_reg(base, is64bit) \ - ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) -#define msi_pending_bits_reg(base, is64bit) \ - ((base) + PCI_MSI_MASK_BIT + ((is64bit) ? 4 : 0)) -#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE -#define multi_msi_capable(control) \ - (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1)) -#define multi_msi_enable(control, num) \ - control |= (((fls(num) - 1) << 4) & PCI_MSI_FLAGS_QSIZE); -#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) -#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) -#define msi_enable(control, num) multi_msi_enable(control, num); \ - control |= PCI_MSI_FLAGS_ENABLE - -#define msix_control_reg(base) (base + PCI_MSIX_FLAGS) -#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE) -#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA) -#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE -#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE -#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) -#define msix_unmask(address) (address & ~PCI_MSIX_VECTOR_BITMASK) -#define msix_mask(address) (address | PCI_MSIX_VECTOR_BITMASK) - -/* - * MSI Defined Data Structures - */ - -struct __packed msg_data { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u32 vector : 8; - __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ - __u32 reserved_1 : 3; - __u32 level : 1; /* 0: deassert | 1: assert */ - __u32 trigger : 1; /* 0: edge | 1: level */ - __u32 reserved_2 : 16; -#elif defined(__BIG_ENDIAN_BITFIELD) - __u32 reserved_2 : 16; - __u32 trigger : 1; /* 0: edge | 1: level */ - __u32 level : 1; /* 0: deassert | 1: assert */ - __u32 reserved_1 : 3; - __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ - __u32 vector : 8; -#else -#error "Bitfield endianness not defined! Check your byteorder.h" -#endif -}; - -struct __packed msg_address { - union { - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u32 reserved_1 : 2; - __u32 dest_mode : 1; /*0:physic | 1:logic */ - __u32 redirection_hint: 1; /*0: dedicated CPU - 1: lowest priority */ - __u32 reserved_2 : 4; - __u32 dest_id : 24; /* Destination ID */ -#elif defined(__BIG_ENDIAN_BITFIELD) - __u32 dest_id : 24; /* Destination ID */ - __u32 reserved_2 : 4; - __u32 redirection_hint: 1; /*0: dedicated CPU - 1: lowest priority */ - __u32 dest_mode : 1; /*0:physic | 1:logic */ - __u32 reserved_1 : 2; -#else -#error "Bitfield endianness not defined! Check your byteorder.h" -#endif - }u; - __u32 value; - }lo_address; - __u32 hi_address; -}; - -#define MAX_MSIX_TABLE_ENTRIES (PCI_MSIX_FLAGS_QSIZE + 1) -#define MAX_MSIX_TABLE_PAGES PFN_UP(MAX_MSIX_TABLE_ENTRIES * \ - PCI_MSIX_ENTRY_SIZE + \ - (~PCI_MSIX_BIRMASK & (PAGE_SIZE - 1))) - -struct arch_msix { - unsigned int nr_entries, used_entries; - struct { - unsigned long first, last; - } table, pba; - int table_refcnt[MAX_MSIX_TABLE_PAGES]; - int table_idx[MAX_MSIX_TABLE_PAGES]; - spinlock_t table_lock; - bool host_maskall, guest_maskall; - domid_t warned; -}; - -void early_msi_init(void); -void msi_compose_msg(unsigned vector, const cpumask_t *mask, - struct msi_msg *msg); -void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable); -void mask_msi_irq(struct irq_desc *); -void unmask_msi_irq(struct irq_desc *); -void guest_mask_msi_irq(struct irq_desc *, bool mask); -void ack_nonmaskable_msi_irq(struct irq_desc *); -void set_msi_affinity(struct irq_desc *, const cpumask_t *); - -#endif /* __ASM_MSI_H */ diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h deleted file mode 100644 index ab68ef2681..0000000000 --- a/xen/include/asm-x86/msr-index.h +++ /dev/null @@ -1,671 +0,0 @@ -#ifndef __ASM_MSR_INDEX_H -#define __ASM_MSR_INDEX_H - -/* - * CPU model specific register (MSR) numbers - * - * Definitions for an MSR should follow this style: - * - * #define MSR_$NAME 0x$INDEX - * #define $NAME_$FIELD1 (_AC($X, ULL) << $POS1) - * #define $NAME_$FIELD2 (_AC($Y, ULL) << $POS2) - * - * Blocks of related constants should be sorted by MSR index. The constant - * names should be as concise as possible, and the bit names may have an - * abbreviated name. Exceptions will be considered on a case-by-case basis. - */ - -#define MSR_APIC_BASE 0x0000001b -#define APIC_BASE_BSP (_AC(1, ULL) << 8) -#define APIC_BASE_EXTD (_AC(1, ULL) << 10) -#define APIC_BASE_ENABLE (_AC(1, ULL) << 11) -#define APIC_BASE_ADDR_MASK 0x000ffffffffff000ULL - -#define MSR_TEST_CTRL 0x00000033 -#define TEST_CTRL_SPLITLOCK_DETECT (_AC(1, ULL) << 29) -#define TEST_CTRL_SPLITLOCK_DISABLE (_AC(1, ULL) << 31) - -#define MSR_INTEL_CORE_THREAD_COUNT 0x00000035 -#define MSR_CTC_THREAD_MASK 0x0000ffff -#define MSR_CTC_CORE_MASK 0xffff0000 - -#define MSR_SPEC_CTRL 0x00000048 -#define SPEC_CTRL_IBRS (_AC(1, ULL) << 0) -#define SPEC_CTRL_STIBP (_AC(1, ULL) << 1) -#define SPEC_CTRL_SSBD (_AC(1, ULL) << 2) -#define SPEC_CTRL_PSFD (_AC(1, ULL) << 7) - -#define MSR_PRED_CMD 0x00000049 -#define PRED_CMD_IBPB (_AC(1, ULL) << 0) - -#define MSR_PPIN_CTL 0x0000004e -#define PPIN_LOCKOUT (_AC(1, ULL) << 0) -#define PPIN_ENABLE (_AC(1, ULL) << 1) -#define MSR_PPIN 0x0000004f - -#define MSR_CORE_CAPABILITIES 0x000000cf -#define CORE_CAPS_SPLITLOCK_DETECT (_AC(1, ULL) << 5) - -#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 -#define NHM_C3_AUTO_DEMOTE (_AC(1, ULL) << 25) -#define NHM_C1_AUTO_DEMOTE (_AC(1, ULL) << 26) -#define ATM_LNC_C6_AUTO_DEMOTE (_AC(1, ULL) << 25) -#define SNB_C3_AUTO_UNDEMOTE (_AC(1, ULL) << 27) -#define SNB_C1_AUTO_UNDEMOTE (_AC(1, ULL) << 28) - -#define MSR_ARCH_CAPABILITIES 0x0000010a -#define ARCH_CAPS_RDCL_NO (_AC(1, ULL) << 0) -#define ARCH_CAPS_IBRS_ALL (_AC(1, ULL) << 1) -#define ARCH_CAPS_RSBA (_AC(1, ULL) << 2) -#define ARCH_CAPS_SKIP_L1DFL (_AC(1, ULL) << 3) -#define ARCH_CAPS_SSB_NO (_AC(1, ULL) << 4) -#define ARCH_CAPS_MDS_NO (_AC(1, ULL) << 5) -#define ARCH_CAPS_IF_PSCHANGE_MC_NO (_AC(1, ULL) << 6) -#define ARCH_CAPS_TSX_CTRL (_AC(1, ULL) << 7) -#define ARCH_CAPS_TAA_NO (_AC(1, ULL) << 8) - -#define MSR_FLUSH_CMD 0x0000010b -#define FLUSH_CMD_L1D (_AC(1, ULL) << 0) - -#define MSR_TSX_FORCE_ABORT 0x0000010f -#define TSX_FORCE_ABORT_RTM (_AC(1, ULL) << 0) -#define TSX_CPUID_CLEAR (_AC(1, ULL) << 1) -#define TSX_ENABLE_RTM (_AC(1, ULL) << 2) - -#define MSR_TSX_CTRL 0x00000122 -#define TSX_CTRL_RTM_DISABLE (_AC(1, ULL) << 0) -#define TSX_CTRL_CPUID_CLEAR (_AC(1, ULL) << 1) - -#define MSR_MCU_OPT_CTRL 0x00000123 -#define MCU_OPT_CTRL_RNGDS_MITG_DIS (_AC(1, ULL) << 0) - -#define MSR_RTIT_OUTPUT_BASE 0x00000560 -#define MSR_RTIT_OUTPUT_MASK 0x00000561 -#define MSR_RTIT_CTL 0x00000570 -#define RTIT_CTL_TRACE_EN (_AC(1, ULL) << 0) -#define RTIT_CTL_CYC_EN (_AC(1, ULL) << 1) -#define RTIT_CTL_OS (_AC(1, ULL) << 2) -#define RTIT_CTL_USR (_AC(1, ULL) << 3) -#define RTIT_CTL_PWR_EVT_EN (_AC(1, ULL) << 4) -#define RTIT_CTL_FUP_ON_PTW (_AC(1, ULL) << 5) -#define RTIT_CTL_FABRIC_EN (_AC(1, ULL) << 6) -#define RTIT_CTL_CR3_FILTER (_AC(1, ULL) << 7) -#define RTIT_CTL_TOPA (_AC(1, ULL) << 8) -#define RTIT_CTL_MTC_EN (_AC(1, ULL) << 9) -#define RTIT_CTL_TSC_EN (_AC(1, ULL) << 10) -#define RTIT_CTL_DIS_RETC (_AC(1, ULL) << 11) -#define RTIT_CTL_PTW_EN (_AC(1, ULL) << 12) -#define RTIT_CTL_BRANCH_EN (_AC(1, ULL) << 13) -#define RTIT_CTL_MTC_FREQ (_AC(0xf, ULL) << 14) -#define RTIT_CTL_CYC_THRESH (_AC(0xf, ULL) << 19) -#define RTIT_CTL_PSB_FREQ (_AC(0xf, ULL) << 24) -#define RTIT_CTL_ADDR(n) (_AC(0xf, ULL) << (32 + 4 * (n))) -#define MSR_RTIT_STATUS 0x00000571 -#define RTIT_STATUS_FILTER_EN (_AC(1, ULL) << 0) -#define RTIT_STATUS_CONTEXT_EN (_AC(1, ULL) << 1) -#define RTIT_STATUS_TRIGGER_EN (_AC(1, ULL) << 2) -#define RTIT_STATUS_ERROR (_AC(1, ULL) << 4) -#define RTIT_STATUS_STOPPED (_AC(1, ULL) << 5) -#define RTIT_STATUS_BYTECNT (_AC(0x1ffff, ULL) << 32) -#define MSR_RTIT_CR3_MATCH 0x00000572 -#define MSR_RTIT_ADDR_A(n) (0x00000580 + (n) * 2) -#define MSR_RTIT_ADDR_B(n) (0x00000581 + (n) * 2) - -#define MSR_U_CET 0x000006a0 -#define MSR_S_CET 0x000006a2 -#define CET_SHSTK_EN (_AC(1, ULL) << 0) -#define CET_WRSS_EN (_AC(1, ULL) << 1) - -#define MSR_PL0_SSP 0x000006a4 -#define MSR_PL1_SSP 0x000006a5 -#define MSR_PL2_SSP 0x000006a6 -#define MSR_PL3_SSP 0x000006a7 -#define MSR_INTERRUPT_SSP_TABLE 0x000006a8 - -#define MSR_X2APIC_FIRST 0x00000800 -#define MSR_X2APIC_LAST 0x00000bff - -#define MSR_X2APIC_TPR 0x00000808 -#define MSR_X2APIC_PPR 0x0000080a -#define MSR_X2APIC_EOI 0x0000080b -#define MSR_X2APIC_TMICT 0x00000838 -#define MSR_X2APIC_TMCCT 0x00000839 -#define MSR_X2APIC_SELF 0x0000083f - -#define MSR_PASID 0x00000d93 -#define PASID_PASID_MASK 0x000fffff -#define PASID_VALID (_AC(1, ULL) << 31) - -#define MSR_EFER 0xc0000080 /* Extended Feature Enable Register */ -#define EFER_SCE (_AC(1, ULL) << 0) /* SYSCALL Enable */ -#define EFER_LME (_AC(1, ULL) << 8) /* Long Mode Enable */ -#define EFER_LMA (_AC(1, ULL) << 10) /* Long Mode Active */ -#define EFER_NXE (_AC(1, ULL) << 11) /* No Execute Enable */ -#define EFER_SVME (_AC(1, ULL) << 12) /* Secure Virtual Machine Enable */ -#define EFER_FFXSE (_AC(1, ULL) << 14) /* Fast FXSAVE/FXRSTOR */ - -#define EFER_KNOWN_MASK \ - (EFER_SCE | EFER_LME | EFER_LMA | EFER_NXE | EFER_SVME | EFER_FFXSE) - -#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ -#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ -#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ -#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ -#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ -#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ -#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ -#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ - -#define MSR_K8_SYSCFG 0xc0010010 -#define SYSCFG_MTRR_FIX_DRAM_EN (_AC(1, ULL) << 18) -#define SYSCFG_MTRR_FIX_DRAM_MOD_EN (_AC(1, ULL) << 19) -#define SYSCFG_MTRR_VAR_DRAM_EN (_AC(1, ULL) << 20) -#define SYSCFG_MTRR_TOM2_EN (_AC(1, ULL) << 21) -#define SYSCFG_TOM2_FORCE_WB (_AC(1, ULL) << 22) - -#define MSR_K8_IORR_BASE0 0xc0010016 -#define MSR_K8_IORR_MASK0 0xc0010017 -#define MSR_K8_IORR_BASE1 0xc0010018 -#define MSR_K8_IORR_MASK1 0xc0010019 - -#define MSR_K8_TSEG_BASE 0xc0010112 /* AMD doc: SMMAddr */ -#define MSR_K8_TSEG_MASK 0xc0010113 /* AMD doc: SMMMask */ - -#define MSR_K8_VM_CR 0xc0010114 -#define VM_CR_INIT_REDIRECTION (_AC(1, ULL) << 1) -#define VM_CR_SVM_DISABLE (_AC(1, ULL) << 4) - -#define MSR_VIRT_SPEC_CTRL 0xc001011f /* Layout matches MSR_SPEC_CTRL */ - -/* - * Legacy MSR constants in need of cleanup. No new MSRs below this comment. - */ - -/* Intel MSRs. Some also available on other CPUs */ -#define MSR_IA32_PERFCTR0 0x000000c1 -#define MSR_IA32_A_PERFCTR0 0x000004c1 -#define MSR_FSB_FREQ 0x000000cd - -#define MSR_MTRRcap 0x000000fe -#define MTRRcap_VCNT 0x000000ff - -#define MSR_IA32_BBL_CR_CTL 0x00000119 - -#define MSR_IA32_SYSENTER_CS 0x00000174 -#define MSR_IA32_SYSENTER_ESP 0x00000175 -#define MSR_IA32_SYSENTER_EIP 0x00000176 - -#define MSR_IA32_MCG_CAP 0x00000179 -#define MSR_IA32_MCG_STATUS 0x0000017a -#define MSR_IA32_MCG_CTL 0x0000017b -#define MSR_IA32_MCG_EXT_CTL 0x000004d0 - -#define MSR_IA32_PEBS_ENABLE 0x000003f1 -#define MSR_IA32_DS_AREA 0x00000600 -#define MSR_IA32_PERF_CAPABILITIES 0x00000345 -/* Lower 6 bits define the format of the address in the LBR stack */ -#define MSR_IA32_PERF_CAP_LBR_FORMAT 0x3f - -#define MSR_IA32_BNDCFGS 0x00000d90 -#define IA32_BNDCFGS_ENABLE 0x00000001 -#define IA32_BNDCFGS_PRESERVE 0x00000002 -#define IA32_BNDCFGS_RESERVED 0x00000ffc - -#define MSR_IA32_XSS 0x00000da0 - -#define MSR_MTRRfix64K_00000 0x00000250 -#define MSR_MTRRfix16K_80000 0x00000258 -#define MSR_MTRRfix16K_A0000 0x00000259 -#define MSR_MTRRfix4K_C0000 0x00000268 -#define MSR_MTRRfix4K_C8000 0x00000269 -#define MSR_MTRRfix4K_D0000 0x0000026a -#define MSR_MTRRfix4K_D8000 0x0000026b -#define MSR_MTRRfix4K_E0000 0x0000026c -#define MSR_MTRRfix4K_E8000 0x0000026d -#define MSR_MTRRfix4K_F0000 0x0000026e -#define MSR_MTRRfix4K_F8000 0x0000026f -#define MSR_MTRRdefType 0x000002ff -#define MTRRdefType_FE (1u << 10) -#define MTRRdefType_E (1u << 11) - -#define MSR_IA32_DEBUGCTLMSR 0x000001d9 -#define IA32_DEBUGCTLMSR_LBR (1<<0) /* Last Branch Record */ -#define IA32_DEBUGCTLMSR_BTF (1<<1) /* Single Step on Branches */ -#define IA32_DEBUGCTLMSR_TR (1<<6) /* Trace Message Enable */ -#define IA32_DEBUGCTLMSR_BTS (1<<7) /* Branch Trace Store */ -#define IA32_DEBUGCTLMSR_BTINT (1<<8) /* Branch Trace Interrupt */ -#define IA32_DEBUGCTLMSR_BTS_OFF_OS (1<<9) /* BTS off if CPL 0 */ -#define IA32_DEBUGCTLMSR_BTS_OFF_USR (1<<10) /* BTS off if CPL > 0 */ -#define IA32_DEBUGCTLMSR_RTM (1<<15) /* RTM debugging enable */ - -#define MSR_IA32_LASTBRANCHFROMIP 0x000001db -#define MSR_IA32_LASTBRANCHTOIP 0x000001dc -#define MSR_IA32_LASTINTFROMIP 0x000001dd -#define MSR_IA32_LASTINTTOIP 0x000001de - -#define MSR_IA32_POWER_CTL 0x000001fc - -#define MSR_IA32_MTRR_PHYSBASE(n) (0x00000200 + 2 * (n)) -#define MSR_IA32_MTRR_PHYSMASK(n) (0x00000201 + 2 * (n)) - -#define MSR_IA32_CR_PAT 0x00000277 -#define MSR_IA32_CR_PAT_RESET 0x0007040600070406ULL - -#define MSR_IA32_MC0_CTL 0x00000400 -#define MSR_IA32_MC0_STATUS 0x00000401 -#define MSR_IA32_MC0_ADDR 0x00000402 -#define MSR_IA32_MC0_MISC 0x00000403 -#define MSR_IA32_MC0_CTL2 0x00000280 -#define CMCI_EN (1UL<<30) -#define CMCI_THRESHOLD_MASK 0x7FFF - -#define MSR_AMD64_MC0_MASK 0xc0010044 - -#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) -#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) -#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) -#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) -#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) - -#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) - -/* MSRs & bits used for VMX enabling */ -#define MSR_IA32_VMX_BASIC 0x480 -#define MSR_IA32_VMX_PINBASED_CTLS 0x481 -#define MSR_IA32_VMX_PROCBASED_CTLS 0x482 -#define MSR_IA32_VMX_EXIT_CTLS 0x483 -#define MSR_IA32_VMX_ENTRY_CTLS 0x484 -#define MSR_IA32_VMX_MISC 0x485 -#define MSR_IA32_VMX_CR0_FIXED0 0x486 -#define MSR_IA32_VMX_CR0_FIXED1 0x487 -#define MSR_IA32_VMX_CR4_FIXED0 0x488 -#define MSR_IA32_VMX_CR4_FIXED1 0x489 -#define MSR_IA32_VMX_VMCS_ENUM 0x48a -#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b -#define MSR_IA32_VMX_EPT_VPID_CAP 0x48c -#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x48d -#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x48e -#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x48f -#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x490 -#define MSR_IA32_VMX_VMFUNC 0x491 - -/* K7/K8 MSRs. Not complete. See the architecture manual for a more - complete list. */ -#define MSR_K7_EVNTSEL0 0xc0010000 -#define MSR_K7_PERFCTR0 0xc0010004 -#define MSR_K7_EVNTSEL1 0xc0010001 -#define MSR_K7_PERFCTR1 0xc0010005 -#define MSR_K7_EVNTSEL2 0xc0010002 -#define MSR_K7_PERFCTR2 0xc0010006 -#define MSR_K7_EVNTSEL3 0xc0010003 -#define MSR_K7_PERFCTR3 0xc0010007 -#define MSR_K8_TOP_MEM1 0xc001001a -#define MSR_K7_CLK_CTL 0xc001001b -#define MSR_K8_TOP_MEM2 0xc001001d - -#define MSR_K8_HWCR 0xc0010015 -#define K8_HWCR_TSC_FREQ_SEL (1ULL << 24) - -#define MSR_K7_FID_VID_CTL 0xc0010041 -#define MSR_K7_FID_VID_STATUS 0xc0010042 -#define MSR_K8_PSTATE_LIMIT 0xc0010061 -#define MSR_K8_PSTATE_CTRL 0xc0010062 -#define MSR_K8_PSTATE_STATUS 0xc0010063 -#define MSR_K8_PSTATE0 0xc0010064 -#define MSR_K8_PSTATE1 0xc0010065 -#define MSR_K8_PSTATE2 0xc0010066 -#define MSR_K8_PSTATE3 0xc0010067 -#define MSR_K8_PSTATE4 0xc0010068 -#define MSR_K8_PSTATE5 0xc0010069 -#define MSR_K8_PSTATE6 0xc001006A -#define MSR_K8_PSTATE7 0xc001006B -#define MSR_K8_ENABLE_C1E 0xc0010055 -#define MSR_K8_VM_HSAVE_PA 0xc0010117 - -#define MSR_AMD_FAM15H_EVNTSEL0 0xc0010200 -#define MSR_AMD_FAM15H_PERFCTR0 0xc0010201 -#define MSR_AMD_FAM15H_EVNTSEL1 0xc0010202 -#define MSR_AMD_FAM15H_PERFCTR1 0xc0010203 -#define MSR_AMD_FAM15H_EVNTSEL2 0xc0010204 -#define MSR_AMD_FAM15H_PERFCTR2 0xc0010205 -#define MSR_AMD_FAM15H_EVNTSEL3 0xc0010206 -#define MSR_AMD_FAM15H_PERFCTR3 0xc0010207 -#define MSR_AMD_FAM15H_EVNTSEL4 0xc0010208 -#define MSR_AMD_FAM15H_PERFCTR4 0xc0010209 -#define MSR_AMD_FAM15H_EVNTSEL5 0xc001020a -#define MSR_AMD_FAM15H_PERFCTR5 0xc001020b - -#define MSR_AMD_L7S0_FEATURE_MASK 0xc0011002 -#define MSR_AMD_THRM_FEATURE_MASK 0xc0011003 -#define MSR_K8_FEATURE_MASK 0xc0011004 -#define MSR_K8_EXT_FEATURE_MASK 0xc0011005 - -/* AMD64 MSRs */ -#define MSR_AMD64_NB_CFG 0xc001001f -#define AMD64_NB_CFG_CF8_EXT_ENABLE_BIT 46 -#define MSR_AMD64_LS_CFG 0xc0011020 -#define MSR_AMD64_IC_CFG 0xc0011021 -#define MSR_AMD64_DC_CFG 0xc0011022 -#define MSR_AMD64_DE_CFG 0xc0011029 -#define AMD64_DE_CFG_LFENCE_SERIALISE (_AC(1, ULL) << 1) -#define MSR_AMD64_EX_CFG 0xc001102c - -#define MSR_AMD64_DR0_ADDRESS_MASK 0xc0011027 -#define MSR_AMD64_DR1_ADDRESS_MASK 0xc0011019 -#define MSR_AMD64_DR2_ADDRESS_MASK 0xc001101a -#define MSR_AMD64_DR3_ADDRESS_MASK 0xc001101b - -/* AMD Family10h machine check MSRs */ -#define MSR_F10_MC4_MISC1 0xc0000408 -#define MSR_F10_MC4_MISC2 0xc0000409 -#define MSR_F10_MC4_MISC3 0xc000040A - -/* AMD Family10h Bus Unit MSRs */ -#define MSR_F10_BU_CFG 0xc0011023 -#define MSR_F10_BU_CFG2 0xc001102a - -/* Other AMD Fam10h MSRs */ -#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 -#define FAM10H_MMIO_CONF_ENABLE (1<<0) -#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf -#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 -#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL -#define FAM10H_MMIO_CONF_BASE_SHIFT 20 - -/* AMD Microcode MSRs */ -#define MSR_AMD_PATCHLEVEL 0x0000008b -#define MSR_AMD_PATCHLOADER 0xc0010020 - -/* AMD TSC RATE MSR */ -#define MSR_AMD64_TSC_RATIO 0xc0000104 - -/* AMD Lightweight Profiling MSRs */ -#define MSR_AMD64_LWP_CFG 0xc0000105 -#define MSR_AMD64_LWP_CBADDR 0xc0000106 - -/* AMD OS Visible Workaround MSRs */ -#define MSR_AMD_OSVW_ID_LENGTH 0xc0010140 -#define MSR_AMD_OSVW_STATUS 0xc0010141 - -/* AMD Protected Processor Inventory Number */ -#define MSR_AMD_PPIN_CTL 0xc00102f0 -#define MSR_AMD_PPIN 0xc00102f1 - -/* K6 MSRs */ -#define MSR_K6_EFER 0xc0000080 -#define MSR_K6_STAR 0xc0000081 -#define MSR_K6_WHCR 0xc0000082 -#define MSR_K6_UWCCR 0xc0000085 -#define MSR_K6_EPMR 0xc0000086 -#define MSR_K6_PSOR 0xc0000087 -#define MSR_K6_PFIR 0xc0000088 - -/* Centaur-Hauls/IDT defined MSRs. */ -#define MSR_IDT_FCR1 0x00000107 -#define MSR_IDT_FCR2 0x00000108 -#define MSR_IDT_FCR3 0x00000109 -#define MSR_IDT_FCR4 0x0000010a - -#define MSR_IDT_MCR0 0x00000110 -#define MSR_IDT_MCR1 0x00000111 -#define MSR_IDT_MCR2 0x00000112 -#define MSR_IDT_MCR3 0x00000113 -#define MSR_IDT_MCR4 0x00000114 -#define MSR_IDT_MCR5 0x00000115 -#define MSR_IDT_MCR6 0x00000116 -#define MSR_IDT_MCR7 0x00000117 -#define MSR_IDT_MCR_CTRL 0x00000120 - -/* VIA Cyrix defined MSRs*/ -#define MSR_VIA_FCR 0x00001107 -#define MSR_VIA_LONGHAUL 0x0000110a -#define MSR_VIA_RNG 0x0000110b -#define MSR_VIA_BCR2 0x00001147 - -/* Transmeta defined MSRs */ -#define MSR_TMTA_LONGRUN_CTRL 0x80868010 -#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 -#define MSR_TMTA_LRTI_READOUT 0x80868018 -#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a - -/* Intel defined MSRs. */ -#define MSR_IA32_P5_MC_ADDR 0x00000000 -#define MSR_IA32_P5_MC_TYPE 0x00000001 -#define MSR_IA32_TSC 0x00000010 -#define MSR_IA32_PLATFORM_ID 0x00000017 -#define MSR_IA32_EBL_CR_POWERON 0x0000002a -#define MSR_IA32_EBC_FREQUENCY_ID 0x0000002c - -#define MSR_IA32_FEATURE_CONTROL 0x0000003a -#define IA32_FEATURE_CONTROL_LOCK 0x0001 -#define IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX 0x0002 -#define IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX 0x0004 -#define IA32_FEATURE_CONTROL_SENTER_PARAM_CTL 0x7f00 -#define IA32_FEATURE_CONTROL_ENABLE_SENTER 0x8000 -#define IA32_FEATURE_CONTROL_SGX_ENABLE 0x40000 -#define IA32_FEATURE_CONTROL_LMCE_ON 0x100000 - -#define MSR_IA32_TSC_ADJUST 0x0000003b - -#define MSR_IA32_UCODE_WRITE 0x00000079 -#define MSR_IA32_UCODE_REV 0x0000008b - -#define MSR_IA32_PERF_STATUS 0x00000198 -#define MSR_IA32_PERF_CTL 0x00000199 - -#define MSR_IA32_MPERF 0x000000e7 -#define MSR_IA32_APERF 0x000000e8 - -#define MSR_IA32_THERM_CONTROL 0x0000019a -#define MSR_IA32_THERM_INTERRUPT 0x0000019b -#define MSR_IA32_THERM_STATUS 0x0000019c -#define MSR_IA32_MISC_ENABLE 0x000001a0 -#define MSR_IA32_MISC_ENABLE_PERF_AVAIL (1<<7) -#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1<<11) -#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1<<12) -#define MSR_IA32_MISC_ENABLE_MONITOR_ENABLE (1<<18) -#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1<<22) -#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1<<23) -#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) - -#define MSR_IA32_TSC_DEADLINE 0x000006E0 -#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 - -/* Platform Shared Resource MSRs */ -#define MSR_IA32_CMT_EVTSEL 0x00000c8d -#define MSR_IA32_CMT_EVTSEL_UE_MASK 0x0000ffff -#define MSR_IA32_CMT_CTR 0x00000c8e -#define MSR_IA32_PSR_ASSOC 0x00000c8f -#define MSR_IA32_PSR_L3_QOS_CFG 0x00000c81 -#define MSR_IA32_PSR_L3_MASK(n) (0x00000c90 + (n)) -#define MSR_IA32_PSR_L3_MASK_CODE(n) (0x00000c90 + (n) * 2 + 1) -#define MSR_IA32_PSR_L3_MASK_DATA(n) (0x00000c90 + (n) * 2) -#define MSR_IA32_PSR_L2_MASK(n) (0x00000d10 + (n)) -#define MSR_IA32_PSR_MBA_MASK(n) (0x00000d50 + (n)) - -/* Intel Model 6 */ -#define MSR_P6_PERFCTR(n) (0x000000c1 + (n)) -#define MSR_P6_EVNTSEL(n) (0x00000186 + (n)) - -/* P4/Xeon+ specific */ -#define MSR_IA32_MCG_EAX 0x00000180 -#define MSR_IA32_MCG_EBX 0x00000181 -#define MSR_IA32_MCG_ECX 0x00000182 -#define MSR_IA32_MCG_EDX 0x00000183 -#define MSR_IA32_MCG_ESI 0x00000184 -#define MSR_IA32_MCG_EDI 0x00000185 -#define MSR_IA32_MCG_EBP 0x00000186 -#define MSR_IA32_MCG_ESP 0x00000187 -#define MSR_IA32_MCG_EFLAGS 0x00000188 -#define MSR_IA32_MCG_EIP 0x00000189 -#define MSR_IA32_MCG_MISC 0x0000018a -#define MSR_IA32_MCG_R8 0x00000190 -#define MSR_IA32_MCG_R9 0x00000191 -#define MSR_IA32_MCG_R10 0x00000192 -#define MSR_IA32_MCG_R11 0x00000193 -#define MSR_IA32_MCG_R12 0x00000194 -#define MSR_IA32_MCG_R13 0x00000195 -#define MSR_IA32_MCG_R14 0x00000196 -#define MSR_IA32_MCG_R15 0x00000197 - -/* Pentium IV performance counter MSRs */ -#define MSR_P4_BPU_PERFCTR0 0x00000300 -#define MSR_P4_BPU_PERFCTR1 0x00000301 -#define MSR_P4_BPU_PERFCTR2 0x00000302 -#define MSR_P4_BPU_PERFCTR3 0x00000303 -#define MSR_P4_MS_PERFCTR0 0x00000304 -#define MSR_P4_MS_PERFCTR1 0x00000305 -#define MSR_P4_MS_PERFCTR2 0x00000306 -#define MSR_P4_MS_PERFCTR3 0x00000307 -#define MSR_P4_FLAME_PERFCTR0 0x00000308 -#define MSR_P4_FLAME_PERFCTR1 0x00000309 -#define MSR_P4_FLAME_PERFCTR2 0x0000030a -#define MSR_P4_FLAME_PERFCTR3 0x0000030b -#define MSR_P4_IQ_PERFCTR0 0x0000030c -#define MSR_P4_IQ_PERFCTR1 0x0000030d -#define MSR_P4_IQ_PERFCTR2 0x0000030e -#define MSR_P4_IQ_PERFCTR3 0x0000030f -#define MSR_P4_IQ_PERFCTR4 0x00000310 -#define MSR_P4_IQ_PERFCTR5 0x00000311 -#define MSR_P4_BPU_CCCR0 0x00000360 -#define MSR_P4_BPU_CCCR1 0x00000361 -#define MSR_P4_BPU_CCCR2 0x00000362 -#define MSR_P4_BPU_CCCR3 0x00000363 -#define MSR_P4_MS_CCCR0 0x00000364 -#define MSR_P4_MS_CCCR1 0x00000365 -#define MSR_P4_MS_CCCR2 0x00000366 -#define MSR_P4_MS_CCCR3 0x00000367 -#define MSR_P4_FLAME_CCCR0 0x00000368 -#define MSR_P4_FLAME_CCCR1 0x00000369 -#define MSR_P4_FLAME_CCCR2 0x0000036a -#define MSR_P4_FLAME_CCCR3 0x0000036b -#define MSR_P4_IQ_CCCR0 0x0000036c -#define MSR_P4_IQ_CCCR1 0x0000036d -#define MSR_P4_IQ_CCCR2 0x0000036e -#define MSR_P4_IQ_CCCR3 0x0000036f -#define MSR_P4_IQ_CCCR4 0x00000370 -#define MSR_P4_IQ_CCCR5 0x00000371 -#define MSR_P4_ALF_ESCR0 0x000003ca -#define MSR_P4_ALF_ESCR1 0x000003cb -#define MSR_P4_BPU_ESCR0 0x000003b2 -#define MSR_P4_BPU_ESCR1 0x000003b3 -#define MSR_P4_BSU_ESCR0 0x000003a0 -#define MSR_P4_BSU_ESCR1 0x000003a1 -#define MSR_P4_CRU_ESCR0 0x000003b8 -#define MSR_P4_CRU_ESCR1 0x000003b9 -#define MSR_P4_CRU_ESCR2 0x000003cc -#define MSR_P4_CRU_ESCR3 0x000003cd -#define MSR_P4_CRU_ESCR4 0x000003e0 -#define MSR_P4_CRU_ESCR5 0x000003e1 -#define MSR_P4_DAC_ESCR0 0x000003a8 -#define MSR_P4_DAC_ESCR1 0x000003a9 -#define MSR_P4_FIRM_ESCR0 0x000003a4 -#define MSR_P4_FIRM_ESCR1 0x000003a5 -#define MSR_P4_FLAME_ESCR0 0x000003a6 -#define MSR_P4_FLAME_ESCR1 0x000003a7 -#define MSR_P4_FSB_ESCR0 0x000003a2 -#define MSR_P4_FSB_ESCR1 0x000003a3 -#define MSR_P4_IQ_ESCR0 0x000003ba -#define MSR_P4_IQ_ESCR1 0x000003bb -#define MSR_P4_IS_ESCR0 0x000003b4 -#define MSR_P4_IS_ESCR1 0x000003b5 -#define MSR_P4_ITLB_ESCR0 0x000003b6 -#define MSR_P4_ITLB_ESCR1 0x000003b7 -#define MSR_P4_IX_ESCR0 0x000003c8 -#define MSR_P4_IX_ESCR1 0x000003c9 -#define MSR_P4_MOB_ESCR0 0x000003aa -#define MSR_P4_MOB_ESCR1 0x000003ab -#define MSR_P4_MS_ESCR0 0x000003c0 -#define MSR_P4_MS_ESCR1 0x000003c1 -#define MSR_P4_PMH_ESCR0 0x000003ac -#define MSR_P4_PMH_ESCR1 0x000003ad -#define MSR_P4_RAT_ESCR0 0x000003bc -#define MSR_P4_RAT_ESCR1 0x000003bd -#define MSR_P4_SAAT_ESCR0 0x000003ae -#define MSR_P4_SAAT_ESCR1 0x000003af -#define MSR_P4_SSU_ESCR0 0x000003be -#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ - -#define MSR_P4_TBPU_ESCR0 0x000003c2 -#define MSR_P4_TBPU_ESCR1 0x000003c3 -#define MSR_P4_TC_ESCR0 0x000003c4 -#define MSR_P4_TC_ESCR1 0x000003c5 -#define MSR_P4_U2L_ESCR0 0x000003b0 -#define MSR_P4_U2L_ESCR1 0x000003b1 - -/* Netburst (P4) last-branch recording */ -#define MSR_P4_LER_FROM_LIP 0x000001d7 -#define MSR_P4_LER_TO_LIP 0x000001d8 -#define MSR_P4_LASTBRANCH_TOS 0x000001da -#define MSR_P4_LASTBRANCH_0 0x000001db -#define NUM_MSR_P4_LASTBRANCH 4 -#define MSR_P4_LASTBRANCH_0_FROM_LIP 0x00000680 -#define MSR_P4_LASTBRANCH_0_TO_LIP 0x000006c0 -#define NUM_MSR_P4_LASTBRANCH_FROM_TO 16 - -/* Core 2 and Atom last-branch recording */ -#define MSR_C2_LASTBRANCH_TOS 0x000001c9 -#define MSR_C2_LASTBRANCH_0_FROM_IP 0x00000040 -#define MSR_C2_LASTBRANCH_0_TO_IP 0x00000060 -#define NUM_MSR_C2_LASTBRANCH_FROM_TO 4 -#define NUM_MSR_ATOM_LASTBRANCH_FROM_TO 8 - -/* Nehalem (and newer) last-branch recording */ -#define MSR_NHL_LBR_SELECT 0x000001c8 -#define MSR_NHL_LASTBRANCH_TOS 0x000001c9 - -/* Skylake (and newer) last-branch recording */ -#define MSR_SKL_LASTBRANCH_0_FROM_IP 0x00000680 -#define MSR_SKL_LASTBRANCH_0_TO_IP 0x000006c0 -#define MSR_SKL_LASTBRANCH_0_INFO 0x00000dc0 -#define NUM_MSR_SKL_LASTBRANCH 32 - -/* Silvermont (and newer) last-branch recording */ -#define MSR_SM_LBR_SELECT 0x000001c8 -#define MSR_SM_LASTBRANCH_TOS 0x000001c9 - -/* Goldmont last-branch recording */ -#define MSR_GM_LASTBRANCH_0_FROM_IP 0x00000680 -#define MSR_GM_LASTBRANCH_0_TO_IP 0x000006c0 -#define NUM_MSR_GM_LASTBRANCH_FROM_TO 32 - -/* Intel Core-based CPU performance counters */ -#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 -#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a -#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b -#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d -#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e -#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f -#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 - -/* Intel cpuid spoofing MSRs */ -#define MSR_INTEL_MASK_V1_CPUID1 0x00000478 - -#define MSR_INTEL_MASK_V2_CPUID1 0x00000130 -#define MSR_INTEL_MASK_V2_CPUID80000001 0x00000131 - -#define MSR_INTEL_MASK_V3_CPUID1 0x00000132 -#define MSR_INTEL_MASK_V3_CPUID80000001 0x00000133 -#define MSR_INTEL_MASK_V3_CPUIDD_01 0x00000134 - -/* Intel cpuid faulting MSRs */ -#define MSR_INTEL_PLATFORM_INFO 0x000000ce -#define _MSR_PLATFORM_INFO_CPUID_FAULTING 31 -#define MSR_PLATFORM_INFO_CPUID_FAULTING (1ULL << _MSR_PLATFORM_INFO_CPUID_FAULTING) - -#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140 -#define _MSR_MISC_FEATURES_CPUID_FAULTING 0 -#define MSR_MISC_FEATURES_CPUID_FAULTING (1ULL << _MSR_MISC_FEATURES_CPUID_FAULTING) - -#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 -#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 - -/* Interrupt Response Limit */ -#define MSR_PKGC3_IRTL 0x0000060a -#define MSR_PKGC6_IRTL 0x0000060b -#define MSR_PKGC7_IRTL 0x0000060c -#define MSR_PKGC8_IRTL 0x00000633 -#define MSR_PKGC9_IRTL 0x00000634 -#define MSR_PKGC10_IRTL 0x00000635 - -#endif /* __ASM_MSR_INDEX_H */ diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h deleted file mode 100644 index 1d3eca9063..0000000000 --- a/xen/include/asm-x86/msr.h +++ /dev/null @@ -1,381 +0,0 @@ -#ifndef __ASM_MSR_H -#define __ASM_MSR_H - -#include "msr-index.h" - -#include -#include -#include - -#include - -#include -#include -#include - -#define rdmsr(msr,val1,val2) \ - __asm__ __volatile__("rdmsr" \ - : "=a" (val1), "=d" (val2) \ - : "c" (msr)) - -#define rdmsrl(msr,val) do { unsigned long a__,b__; \ - __asm__ __volatile__("rdmsr" \ - : "=a" (a__), "=d" (b__) \ - : "c" (msr)); \ - val = a__ | ((u64)b__<<32); \ -} while(0) - -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ - : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - -static inline void wrmsrl(unsigned int msr, __u64 val) -{ - __u32 lo, hi; - lo = (__u32)val; - hi = (__u32)(val >> 32); - wrmsr(msr, lo, hi); -} - -/* rdmsr with exception handling */ -#define rdmsr_safe(msr,val) ({\ - int rc_; \ - uint32_t lo_, hi_; \ - __asm__ __volatile__( \ - "1: rdmsr\n2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: xorl %0,%0\n; xorl %1,%1\n" \ - " movl %5,%2\n; jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : "=a" (lo_), "=d" (hi_), "=&r" (rc_) \ - : "c" (msr), "2" (0), "i" (-EFAULT)); \ - val = lo_ | ((uint64_t)hi_ << 32); \ - rc_; }) - -/* wrmsr with exception handling */ -static inline int wrmsr_safe(unsigned int msr, uint64_t val) -{ - int rc; - uint32_t lo, hi; - lo = (uint32_t)val; - hi = (uint32_t)(val >> 32); - - __asm__ __volatile__( - "1: wrmsr\n2:\n" - ".section .fixup,\"ax\"\n" - "3: movl %5,%0\n; jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : "=&r" (rc) - : "c" (msr), "a" (lo), "d" (hi), "0" (0), "i" (-EFAULT)); - return rc; -} - -static inline uint64_t msr_fold(const struct cpu_user_regs *regs) -{ - return (regs->rdx << 32) | regs->eax; -} - -static inline void msr_split(struct cpu_user_regs *regs, uint64_t val) -{ - regs->rdx = val >> 32; - regs->rax = (uint32_t)val; -} - -static inline uint64_t rdtsc(void) -{ - uint32_t low, high; - - __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)); - - return ((uint64_t)high << 32) | low; -} - -static inline uint64_t rdtsc_ordered(void) -{ - /* - * The RDTSC instruction is not ordered relative to memory access. - * The Intel SDM and the AMD APM are both vague on this point, but - * empirically an RDTSC instruction can be speculatively executed - * before prior loads. An RDTSC immediately after an appropriate - * barrier appears to be ordered as a normal load, that is, it - * provides the same ordering guarantees as reading from a global - * memory location that some other imaginary CPU is updating - * continuously with a time stamp. - */ - alternative("lfence", "mfence", X86_FEATURE_MFENCE_RDTSC); - return rdtsc(); -} - -#define __write_tsc(val) wrmsrl(MSR_IA32_TSC, val) -#define write_tsc(val) ({ \ - /* Reliable TSCs are in lockstep across all CPUs. We should \ - * never write to them. */ \ - ASSERT(!boot_cpu_has(X86_FEATURE_TSC_RELIABLE)); \ - __write_tsc(val); \ -}) - -#define rdpmc(counter,low,high) \ - __asm__ __volatile__("rdpmc" \ - : "=a" (low), "=d" (high) \ - : "c" (counter)) - -/* - * On hardware supporting FSGSBASE, the value loaded into hardware is the - * guest kernel's choice for 64bit PV guests (Xen's choice for Idle, HVM and - * 32bit PV). - * - * Therefore, the {RD,WR}{FS,GS}BASE instructions are only safe to use if - * %cr4.fsgsbase is set. - */ -static inline unsigned long __rdfsbase(void) -{ - unsigned long base; - -#ifdef HAVE_AS_FSGSBASE - asm volatile ( "rdfsbase %0" : "=r" (base) ); -#else - asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc0" : "=a" (base) ); -#endif - - return base; -} - -static inline unsigned long __rdgsbase(void) -{ - unsigned long base; - -#ifdef HAVE_AS_FSGSBASE - asm volatile ( "rdgsbase %0" : "=r" (base) ); -#else - asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc8" : "=a" (base) ); -#endif - - return base; -} - -static inline void __wrfsbase(unsigned long base) -{ -#ifdef HAVE_AS_FSGSBASE - asm volatile ( "wrfsbase %0" :: "r" (base) ); -#else - asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd0" :: "a" (base) ); -#endif -} - -static inline void __wrgsbase(unsigned long base) -{ -#ifdef HAVE_AS_FSGSBASE - asm volatile ( "wrgsbase %0" :: "r" (base) ); -#else - asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd8" :: "a" (base) ); -#endif -} - -static inline unsigned long read_fs_base(void) -{ - unsigned long base; - - if ( read_cr4() & X86_CR4_FSGSBASE ) - return __rdfsbase(); - - rdmsrl(MSR_FS_BASE, base); - - return base; -} - -static inline unsigned long read_gs_base(void) -{ - unsigned long base; - - if ( read_cr4() & X86_CR4_FSGSBASE ) - return __rdgsbase(); - - rdmsrl(MSR_GS_BASE, base); - - return base; -} - -static inline unsigned long read_gs_shadow(void) -{ - unsigned long base; - - if ( read_cr4() & X86_CR4_FSGSBASE ) - { - asm volatile ( "swapgs" ); - base = __rdgsbase(); - asm volatile ( "swapgs" ); - } - else - rdmsrl(MSR_SHADOW_GS_BASE, base); - - return base; -} - -static inline void write_fs_base(unsigned long base) -{ - if ( read_cr4() & X86_CR4_FSGSBASE ) - __wrfsbase(base); - else - wrmsrl(MSR_FS_BASE, base); -} - -static inline void write_gs_base(unsigned long base) -{ - if ( read_cr4() & X86_CR4_FSGSBASE ) - __wrgsbase(base); - else - wrmsrl(MSR_GS_BASE, base); -} - -static inline void write_gs_shadow(unsigned long base) -{ - if ( read_cr4() & X86_CR4_FSGSBASE ) - { - asm volatile ( "swapgs\n\t" -#ifdef HAVE_AS_FSGSBASE - "wrgsbase %0\n\t" - "swapgs" - :: "r" (base) ); -#else - ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd8\n\t" - "swapgs" - :: "a" (base) ); -#endif - } - else - wrmsrl(MSR_SHADOW_GS_BASE, base); -} - -DECLARE_PER_CPU(uint64_t, efer); -static inline uint64_t read_efer(void) -{ - return this_cpu(efer); -} - -static inline void write_efer(uint64_t val) -{ - this_cpu(efer) = val; - wrmsrl(MSR_EFER, val); -} - -extern unsigned int ler_msr; - -DECLARE_PER_CPU(uint32_t, tsc_aux); - -/* Lazy update of MSR_TSC_AUX */ -static inline void wrmsr_tsc_aux(uint32_t val) -{ - uint32_t *this_tsc_aux = &this_cpu(tsc_aux); - - if ( *this_tsc_aux != val ) - { - wrmsr(MSR_TSC_AUX, val, 0); - *this_tsc_aux = val; - } -} - -extern struct msr_policy raw_msr_policy, - host_msr_policy, - pv_max_msr_policy, - pv_def_msr_policy, - hvm_max_msr_policy, - hvm_def_msr_policy; - -/* Container object for per-vCPU MSRs */ -struct vcpu_msrs -{ - /* 0x00000048 - MSR_SPEC_CTRL */ - struct { - uint32_t raw; - } spec_ctrl; - - /* - * 0x00000140 - MSR_INTEL_MISC_FEATURES_ENABLES - * - * This MSR is non-architectural, but for simplicy we allow it to be read - * unconditionally. The CPUID Faulting bit is the only writeable bit, and - * only if enumerated by MSR_PLATFORM_INFO. - */ - union { - uint32_t raw; - struct { - bool cpuid_faulting:1; - }; - } misc_features_enables; - - /* - * 0x00000560 ... 57x - MSR_RTIT_* - * - * "Real Time Instruction Trace", now called Processor Trace. - * - * These MSRs are not exposed to guests. They are controlled by Xen - * behind the scenes, when vmtrace is enabled for the domain. - * - * MSR_RTIT_OUTPUT_BASE not stored here. It is fixed per vcpu, and - * derived from v->vmtrace.buf. - */ - struct { - /* - * Placed in the MSR load/save lists. Only modified by hypercall in - * the common case. - */ - uint64_t ctl; - - /* - * Updated by hardware in non-root mode. Synchronised here on vcpu - * context switch. - */ - uint64_t status; - union { - uint64_t output_mask; - struct { - uint32_t output_limit; - uint32_t output_offset; - }; - }; - } rtit; - - /* 0x00000da0 - MSR_IA32_XSS */ - struct { - uint64_t raw; - } xss; - - /* - * 0xc0000103 - MSR_TSC_AUX - * - * Value is guest chosen, and always loaded in vcpu context. Guests have - * no direct MSR access, and the value is accessible to userspace with the - * RDTSCP and RDPID instructions. - */ - uint32_t tsc_aux; - - /* - * 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK - * - * Loaded into hardware for guests which have active %dr7 settings. - * Furthermore, HVM guests are offered direct access, meaning that the - * values here may be stale in current context. - */ - uint32_t dr_mask[4]; -}; - -void init_guest_msr_policy(void); -int init_domain_msr_policy(struct domain *d); -int init_vcpu_msr_policy(struct vcpu *v); - -/* - * Below functions can return X86EMUL_UNHANDLEABLE which means that MSR is - * not (yet) handled by it and must be processed by legacy handlers. Such - * behaviour is needed for transition period until all rd/wrmsr are handled - * by the new MSR infrastructure. - * - * These functions are also used by the migration logic, so need to cope with - * being used outside of v's context. - */ -int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val); -int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val); - -#endif /* __ASM_MSR_H */ diff --git a/xen/include/asm-x86/mtrr.h b/xen/include/asm-x86/mtrr.h deleted file mode 100644 index e0fd1005ce..0000000000 --- a/xen/include/asm-x86/mtrr.h +++ /dev/null @@ -1,103 +0,0 @@ -#ifndef __ASM_X86_MTRR_H__ -#define __ASM_X86_MTRR_H__ - -#include - -/* These are the region types. They match the architectural specification. */ -#define MTRR_TYPE_UNCACHABLE 0 -#define MTRR_TYPE_WRCOMB 1 -#define MTRR_TYPE_WRTHROUGH 4 -#define MTRR_TYPE_WRPROT 5 -#define MTRR_TYPE_WRBACK 6 -#define MTRR_NUM_TYPES 7 -#define MEMORY_NUM_TYPES MTRR_NUM_TYPES -#define NO_HARDCODE_MEM_TYPE MTRR_NUM_TYPES - -#define NORMAL_CACHE_MODE 0 -#define NO_FILL_CACHE_MODE 2 - -enum { - PAT_TYPE_UNCACHABLE=0, - PAT_TYPE_WRCOMB=1, - PAT_TYPE_WRTHROUGH=4, - PAT_TYPE_WRPROT=5, - PAT_TYPE_WRBACK=6, - PAT_TYPE_UC_MINUS=7, - PAT_TYPE_NUMS -}; - -#define INVALID_MEM_TYPE PAT_TYPE_NUMS - -/* In the Intel processor's MTRR interface, the MTRR type is always held in - an 8 bit field: */ -typedef u8 mtrr_type; - -#define MTRR_PHYSMASK_VALID_BIT 11 -#define MTRR_PHYSMASK_VALID (1 << MTRR_PHYSMASK_VALID_BIT) -#define MTRR_PHYSMASK_SHIFT 12 -#define MTRR_PHYSBASE_TYPE_MASK 0xff -#define MTRR_PHYSBASE_SHIFT 12 -/* Number of variable range MSR pairs we emulate for HVM guests: */ -#define MTRR_VCNT 8 -/* Maximum number of variable range MSR pairs if FE is supported. */ -#define MTRR_VCNT_MAX ((MSR_MTRRfix64K_00000 - \ - MSR_IA32_MTRR_PHYSBASE(0)) / 2) - -struct mtrr_var_range { - uint64_t base; - uint64_t mask; -}; - -#define NUM_FIXED_RANGES 88 -#define NUM_FIXED_MSR 11 -struct mtrr_state { - struct mtrr_var_range *var_ranges; - mtrr_type fixed_ranges[NUM_FIXED_RANGES]; - bool enabled; - bool fixed_enabled; - bool have_fixed; - mtrr_type def_type; - - u64 mtrr_cap; - /* ranges in var MSRs are overlapped or not:0(no overlapped) */ - bool_t overlapped; -}; -extern struct mtrr_state mtrr_state; - -extern void mtrr_save_fixed_ranges(void *); -extern void mtrr_save_state(void); -extern int mtrr_add(unsigned long base, unsigned long size, - unsigned int type, char increment); -extern int mtrr_add_page(unsigned long base, unsigned long size, - unsigned int type, char increment); -extern int mtrr_del(int reg, unsigned long base, unsigned long size); -extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); -extern int mtrr_get_type(const struct mtrr_state *m, paddr_t pa, - unsigned int order); -extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); -extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr, - paddr_t spaddr, uint8_t gmtrr_mtype); -extern unsigned char pat_type_2_pte_flags(unsigned char pat_type); -extern int hold_mtrr_updates_on_aps; -extern void mtrr_aps_sync_begin(void); -extern void mtrr_aps_sync_end(void); -extern void mtrr_bp_restore(void); - -extern bool_t mtrr_var_range_msr_set(struct domain *, struct mtrr_state *, - uint32_t msr, uint64_t msr_content); -extern bool_t mtrr_fix_range_msr_set(struct domain *, struct mtrr_state *, - uint32_t row, uint64_t msr_content); -extern bool_t mtrr_def_type_msr_set(struct domain *, struct mtrr_state *, - uint64_t msr_content); -#ifdef CONFIG_HVM -extern void memory_type_changed(struct domain *); -#else -static inline void memory_type_changed(struct domain *d) {} -#endif - -extern bool_t pat_msr_set(uint64_t *pat, uint64_t msr); - -bool is_var_mtrr_overlapped(const struct mtrr_state *m); -bool mtrr_pat_not_equal(const struct vcpu *vd, const struct vcpu *vs); - -#endif /* __ASM_X86_MTRR_H__ */ diff --git a/xen/include/asm-x86/multicall.h b/xen/include/asm-x86/multicall.h deleted file mode 100644 index 7e1d4c121a..0000000000 --- a/xen/include/asm-x86/multicall.h +++ /dev/null @@ -1,12 +0,0 @@ -/****************************************************************************** - * asm-x86/multicall.h - */ - -#ifndef __ASM_X86_MULTICALL_H__ -#define __ASM_X86_MULTICALL_H__ - -#include - -typeof(arch_do_multicall_call) pv_do_multicall_call, hvm_do_multicall_call; - -#endif /* __ASM_X86_MULTICALL_H__ */ diff --git a/xen/include/asm-x86/mwait.h b/xen/include/asm-x86/mwait.h deleted file mode 100644 index f377d9fdca..0000000000 --- a/xen/include/asm-x86/mwait.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __ASM_X86_MWAIT_H__ -#define __ASM_X86_MWAIT_H__ - -#include - -#define MWAIT_SUBSTATE_MASK 0xf -#define MWAIT_CSTATE_MASK 0xf -#define MWAIT_SUBSTATE_SIZE 4 - -#define CPUID_MWAIT_LEAF 5 -#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 -#define CPUID5_ECX_INTERRUPT_BREAK 0x2 - -#define MWAIT_ECX_INTERRUPT_BREAK 0x1 - -void mwait_idle_with_hints(unsigned int eax, unsigned int ecx); -bool mwait_pc10_supported(void); - -#endif /* __ASM_X86_MWAIT_H__ */ diff --git a/xen/include/asm-x86/nmi.h b/xen/include/asm-x86/nmi.h deleted file mode 100644 index 9a5da14162..0000000000 --- a/xen/include/asm-x86/nmi.h +++ /dev/null @@ -1,46 +0,0 @@ - -#ifndef ASM_NMI_H -#define ASM_NMI_H - -#include - -struct cpu_user_regs; - -/* Watchdog boolean from the command line */ -extern bool opt_watchdog; - -/* Watchdog force parameter from the command line */ -extern bool watchdog_force; - -/* CPU to handle platform NMI */ -extern const unsigned int nmi_cpu; - -typedef int nmi_callback_t(const struct cpu_user_regs *regs, int cpu); - -/** - * set_nmi_callback - * - * Set a handler for an NMI. Only one handler may be - * set. Return the old nmi callback handler. - */ -nmi_callback_t *set_nmi_callback(nmi_callback_t *callback); - -/** - * unset_nmi_callback - * - * Remove the handler previously set. - */ -void unset_nmi_callback(void); - -DECLARE_PER_CPU(unsigned int, nmi_count); - -/** - * trigger_nmi_continuation - * - * Schedule continuation to be started in interrupt context after NMI handling. - */ -void trigger_nmi_continuation(void); - -/* Check for NMI continuation pending. */ -bool nmi_check_continuation(void); -#endif /* ASM_NMI_H */ diff --git a/xen/include/asm-x86/nops.h b/xen/include/asm-x86/nops.h deleted file mode 100644 index 1a46b97aff..0000000000 --- a/xen/include/asm-x86/nops.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef __X86_ASM_NOPS_H__ -#define __X86_ASM_NOPS_H__ - -/* - * Define nops for use with alternative(). - */ - -#define NOP_DS_PREFIX 0x3e - -/* - * Opteron 64bit nops - * 1: nop - * 2: osp nop - * 3: osp osp nop - * 4: osp osp osp nop - */ -#define K8_NOP1 0x90 -#define K8_NOP2 0x66,K8_NOP1 -#define K8_NOP3 0x66,K8_NOP2 -#define K8_NOP4 0x66,K8_NOP3 -#define K8_NOP5 K8_NOP3,K8_NOP2 -#define K8_NOP6 K8_NOP3,K8_NOP3 -#define K8_NOP7 K8_NOP4,K8_NOP3 -#define K8_NOP8 K8_NOP4,K8_NOP4 -#define K8_NOP9 K8_NOP3,K8_NOP3,K8_NOP3 - -/* - * P6 nops - * uses eax dependencies (Intel-recommended choice) - * 1: nop - * 2: osp nop - * 3: nopl (%eax) - * 4: nopl 0x00(%eax) - * 5: nopl 0x00(%eax,%eax,1) - * 6: osp nopl 0x00(%eax,%eax,1) - * 7: nopl 0x00000000(%eax) - * 8: nopl 0x00000000(%eax,%eax,1) - * 9: nopw 0x00000000(%eax,%eax,1) - * Note: All the above are assumed to be a single instruction. - * There is kernel code that depends on this. - */ -#define P6_NOP1 0x90 -#define P6_NOP2 0x66,0x90 -#define P6_NOP3 0x0f,0x1f,0x00 -#define P6_NOP4 0x0f,0x1f,0x40,0 -#define P6_NOP5 0x0f,0x1f,0x44,0x00,0 -#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0 -#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0 -#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 -#define P6_NOP9 0x66,0x0f,0x1f,0x84,0x00,0,0,0,0 - -#ifdef __ASSEMBLY__ -#define _ASM_MK_NOP(x) .byte x -#else -#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" -#endif - -#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1) -#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2) -#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3) -#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4) -#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5) -#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6) -#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7) -#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8) -#define ASM_NOP9 _ASM_MK_NOP(P6_NOP9) - -#define ASM_NOP_MAX 9 - -#endif /* __X86_ASM_NOPS_H__ */ diff --git a/xen/include/asm-x86/nospec.h b/xen/include/asm-x86/nospec.h deleted file mode 100644 index 5312ae4c6f..0000000000 --- a/xen/include/asm-x86/nospec.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. */ - -#ifndef _ASM_X86_NOSPEC_H -#define _ASM_X86_NOSPEC_H - -#include - -/* Allow to insert a read memory barrier into conditionals */ -static always_inline bool barrier_nospec_true(void) -{ -#ifdef CONFIG_SPECULATIVE_HARDEN_BRANCH - alternative("lfence", "", X86_FEATURE_SC_NO_BRANCH_HARDEN); -#endif - return true; -} - -/* Allow to protect evaluation of conditionals with respect to speculation */ -static always_inline bool evaluate_nospec(bool condition) -{ - return condition ? barrier_nospec_true() : !barrier_nospec_true(); -} - -/* Allow to block speculative execution in generic code */ -static always_inline void block_speculation(void) -{ - barrier_nospec_true(); -} - -#endif /* _ASM_X86_NOSPEC_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/numa.h b/xen/include/asm-x86/numa.h deleted file mode 100644 index bada2c0bb9..0000000000 --- a/xen/include/asm-x86/numa.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef _ASM_X8664_NUMA_H -#define _ASM_X8664_NUMA_H 1 - -#include - -#define NODES_SHIFT 6 - -typedef u8 nodeid_t; - -extern int srat_rev; - -extern nodeid_t cpu_to_node[NR_CPUS]; -extern cpumask_t node_to_cpumask[]; - -#define cpu_to_node(cpu) (cpu_to_node[cpu]) -#define parent_node(node) (node) -#define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) -#define node_to_cpumask(node) (node_to_cpumask[node]) - -struct node { - u64 start,end; -}; - -extern int compute_hash_shift(struct node *nodes, int numnodes, - nodeid_t *nodeids); -extern nodeid_t pxm_to_node(unsigned int pxm); - -#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) -#define VIRTUAL_BUG_ON(x) - -extern void numa_add_cpu(int cpu); -extern void numa_init_array(void); -extern bool numa_off; - - -extern int srat_disabled(void); -extern void numa_set_node(int cpu, nodeid_t node); -extern nodeid_t setup_node(unsigned int pxm); -extern void srat_detect_node(int cpu); - -extern void setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end); -extern nodeid_t apicid_to_node[]; -extern void init_cpu_to_node(void); - -static inline void clear_node_cpumask(int cpu) -{ - cpumask_clear_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]); -} - -/* Simple perfect hash to map pdx to node numbers */ -extern int memnode_shift; -extern unsigned long memnodemapsize; -extern u8 *memnodemap; - -struct node_data { - unsigned long node_start_pfn; - unsigned long node_spanned_pages; -}; - -extern struct node_data node_data[]; - -static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr) -{ - nodeid_t nid; - VIRTUAL_BUG_ON((paddr_to_pdx(addr) >> memnode_shift) >= memnodemapsize); - nid = memnodemap[paddr_to_pdx(addr) >> memnode_shift]; - VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); - return nid; -} - -#define NODE_DATA(nid) (&(node_data[nid])) - -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ - NODE_DATA(nid)->node_spanned_pages) - -extern int valid_numa_range(u64 start, u64 end, nodeid_t node); - -void srat_parse_regions(u64 addr); -extern u8 __node_distance(nodeid_t a, nodeid_t b); -unsigned int arch_get_dma_bitsize(void); - -#endif diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h deleted file mode 100644 index 357a808748..0000000000 --- a/xen/include/asm-x86/p2m.h +++ /dev/null @@ -1,1022 +0,0 @@ -/****************************************************************************** - * include/asm-x86/paging.h - * - * physical-to-machine mappings for automatically-translated domains. - * - * Copyright (c) 2011 GridCentric Inc. (Andres Lagar-Cavilla) - * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) - * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc. - * Parts of this code are Copyright (c) 2006 by Michael A Fetterman - * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef _XEN_ASM_X86_P2M_H -#define _XEN_ASM_X86_P2M_H - -#include -#include -#include -#include /* for pagetable_t */ - -/* Debugging and auditing of the P2M code? */ -#if !defined(NDEBUG) && defined(CONFIG_HVM) -#define P2M_AUDIT 1 -#else -#define P2M_AUDIT 0 -#endif -#define P2M_DEBUGGING 0 - -extern bool_t opt_hap_1gb, opt_hap_2mb; - -/* - * The upper levels of the p2m pagetable always contain full rights; all - * variation in the access control bits is made in the level-1 PTEs. - * - * In addition to the phys-to-machine translation, each p2m PTE contains - * *type* information about the gfn it translates, helping Xen to decide - * on the correct course of action when handling a page-fault to that - * guest frame. We store the type in the "available" bits of the PTEs - * in the table, which gives us 8 possible types on 32-bit systems. - * Further expansions of the type system will only be supported on - * 64-bit Xen. - */ - -/* - * AMD IOMMU: When we share p2m table with iommu, bit 52 -bit 58 in pte - * cannot be non-zero, otherwise, hardware generates io page faults when - * device access those pages. Therefore, p2m_ram_rw has to be defined as 0. - */ -typedef enum { - p2m_ram_rw = 0, /* Normal read/write guest RAM */ - p2m_invalid = 1, /* Nothing mapped here */ - p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */ - p2m_ram_ro = 3, /* Read-only; writes are silently dropped */ - p2m_mmio_dm = 4, /* Reads and write go to the device model */ - p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */ - p2m_populate_on_demand = 6, /* Place-holder for empty memory */ - - /* Although these are defined in all builds, they can only - * be used in 64-bit builds */ - p2m_grant_map_rw = 7, /* Read/write grant mapping */ - p2m_grant_map_ro = 8, /* Read-only grant mapping */ - p2m_ram_paging_out = 9, /* Memory that is being paged out */ - p2m_ram_paged = 10, /* Memory that has been paged out */ - p2m_ram_paging_in = 11, /* Memory that is being paged in */ - p2m_ram_shared = 12, /* Shared or sharable memory */ - p2m_ram_broken = 13, /* Broken page, access cause domain crash */ - p2m_map_foreign = 14, /* ram pages from foreign domain */ - p2m_ioreq_server = 15, -} p2m_type_t; - -/* Modifiers to the query */ -typedef unsigned int p2m_query_t; -#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ -#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ - -/* We use bitmaps and maks to handle groups of types */ -#define p2m_to_mask(_t) (1UL << (_t)) - -/* RAM types, which map to real machine frames */ -#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \ - | p2m_to_mask(p2m_ram_logdirty) \ - | p2m_to_mask(p2m_ram_ro) \ - | p2m_to_mask(p2m_ram_paging_out) \ - | p2m_to_mask(p2m_ram_paged) \ - | p2m_to_mask(p2m_ram_paging_in) \ - | p2m_to_mask(p2m_ram_shared) \ - | p2m_to_mask(p2m_ioreq_server)) - -/* Types that represent a physmap hole that is ok to replace with a shared - * entry */ -#define P2M_HOLE_TYPES (p2m_to_mask(p2m_mmio_dm) \ - | p2m_to_mask(p2m_invalid) \ - | p2m_to_mask(p2m_ram_paging_in) \ - | p2m_to_mask(p2m_ram_paged)) - -/* Grant mapping types, which map to a real machine frame in another - * VM */ -#define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) \ - | p2m_to_mask(p2m_grant_map_ro) ) - -/* MMIO types, which don't have to map to anything in the frametable */ -#define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_dm) \ - | p2m_to_mask(p2m_mmio_direct)) - -/* Read-only types, which must have the _PAGE_RW bit clear in their PTEs */ -#define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \ - | p2m_to_mask(p2m_ram_ro) \ - | p2m_to_mask(p2m_grant_map_ro) \ - | p2m_to_mask(p2m_ram_shared)) - -/* Write-discard types, which should discard the write operations */ -#define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro) \ - | p2m_to_mask(p2m_grant_map_ro)) - -/* Types that can be subject to bulk transitions. */ -#define P2M_CHANGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \ - | p2m_to_mask(p2m_ram_logdirty) \ - | p2m_to_mask(p2m_ioreq_server) ) - -#define P2M_POD_TYPES (p2m_to_mask(p2m_populate_on_demand)) - -/* Pageable types */ -#define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \ - | p2m_to_mask(p2m_ram_logdirty) ) - -#ifdef CONFIG_MEM_PAGING -#define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out) \ - | p2m_to_mask(p2m_ram_paged) \ - | p2m_to_mask(p2m_ram_paging_in)) - -#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged)) -#else -#define P2M_PAGING_TYPES 0 -#define P2M_PAGED_TYPES 0 -#endif - -/* Shared types */ -/* XXX: Sharable types could include p2m_ram_ro too, but we would need to - * reinit the type correctly after fault */ -#define P2M_SHARABLE_TYPES (p2m_to_mask(p2m_ram_rw) \ - | p2m_to_mask(p2m_ram_logdirty) ) -#define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared)) - -/* Types established/cleaned up via special accessors. */ -#define P2M_SPECIAL_TYPES (P2M_GRANT_TYPES | \ - p2m_to_mask(p2m_map_foreign) | \ - p2m_to_mask(p2m_mmio_direct)) - -/* Valid types not necessarily associated with a (valid) MFN. */ -#define P2M_INVALID_MFN_TYPES (P2M_POD_TYPES \ - | p2m_to_mask(p2m_mmio_direct) \ - | P2M_PAGING_TYPES) - -/* Broken type: the frame backing this pfn has failed in hardware - * and must not be touched. */ -#define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken)) - -/* Useful predicates */ -#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES) -#define p2m_is_hole(_t) (p2m_to_mask(_t) & P2M_HOLE_TYPES) -#define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES) -#define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES) -#define p2m_is_discard_write(_t) (p2m_to_mask(_t) & P2M_DISCARD_WRITE_TYPES) -#define p2m_is_changeable(_t) (p2m_to_mask(_t) & P2M_CHANGEABLE_TYPES) -#define p2m_is_pod(_t) (p2m_to_mask(_t) & P2M_POD_TYPES) -#define p2m_is_grant(_t) (p2m_to_mask(_t) & P2M_GRANT_TYPES) -/* Grant types are *not* considered valid, because they can be - unmapped at any time and, unless you happen to be the shadow or p2m - implementations, there's no way of synchronising against that. */ -#define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES)) -#define p2m_has_emt(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | p2m_to_mask(p2m_mmio_direct))) -#define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES) -#define p2m_is_paging(_t) (p2m_to_mask(_t) & P2M_PAGING_TYPES) -#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES) -#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES) -#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES) -#define p2m_is_special(_t) (p2m_to_mask(_t) & P2M_SPECIAL_TYPES) -#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES) -#define p2m_is_foreign(_t) (p2m_to_mask(_t) & p2m_to_mask(p2m_map_foreign)) - -#define p2m_is_any_ram(_t) (p2m_to_mask(_t) & \ - (P2M_RAM_TYPES | P2M_GRANT_TYPES | \ - p2m_to_mask(p2m_map_foreign))) - -#define p2m_allows_invalid_mfn(t) (p2m_to_mask(t) & P2M_INVALID_MFN_TYPES) - -typedef enum { - p2m_host, - p2m_nested, - p2m_alternate, -} p2m_class_t; - -/* Per-p2m-table state */ -struct p2m_domain { - /* Lock that protects updates to the p2m */ - mm_rwlock_t lock; - - /* Shadow translated domain: p2m mapping */ - pagetable_t phys_table; - - /* - * Same as a domain's dirty_cpumask but limited to - * this p2m and those physical cpus whose vcpu's are in - * guestmode. - */ - cpumask_var_t dirty_cpumask; - - struct domain *domain; /* back pointer to domain */ - - p2m_class_t p2m_class; /* host/nested/alternate */ - - /* - * Default P2M access type for each page in the the domain: new pages, - * swapped in pages, cleared pages, and pages that are ambiguously - * retyped get this access type. See definition of p2m_access_t. - */ - p2m_access_t default_access; - - /* Pages used to construct the p2m */ - struct page_list_head pages; - - /* Host p2m: Log-dirty ranges registered for the domain. */ - struct rangeset *logdirty_ranges; - - /* Host p2m: Global log-dirty mode enabled for the domain. */ - bool global_logdirty; - -#ifdef CONFIG_HVM - /* Alternate p2m: count of vcpu's currently using this p2m. */ - atomic_t active_vcpus; - - int (*set_entry)(struct p2m_domain *p2m, - gfn_t gfn, - mfn_t mfn, unsigned int page_order, - p2m_type_t p2mt, - p2m_access_t p2ma, - int sve); - mfn_t (*get_entry)(struct p2m_domain *p2m, - gfn_t gfn, - p2m_type_t *p2mt, - p2m_access_t *p2ma, - p2m_query_t q, - unsigned int *page_order, - bool_t *sve); - int (*recalc)(struct p2m_domain *p2m, - unsigned long gfn); - void (*enable_hardware_log_dirty)(struct p2m_domain *p2m); - void (*disable_hardware_log_dirty)(struct p2m_domain *p2m); - void (*flush_hardware_cached_dirty)(struct p2m_domain *p2m); - void (*change_entry_type_global)(struct p2m_domain *p2m, - p2m_type_t ot, - p2m_type_t nt); - int (*change_entry_type_range)(struct p2m_domain *p2m, - p2m_type_t ot, p2m_type_t nt, - unsigned long first_gfn, - unsigned long last_gfn); - void (*memory_type_changed)(struct p2m_domain *p2m); - void (*write_p2m_entry_pre)(struct domain *d, - unsigned long gfn, - l1_pgentry_t old, - l1_pgentry_t new, - unsigned int level); - void (*write_p2m_entry_post)(struct p2m_domain *p2m, - unsigned int oflags); -#endif -#if P2M_AUDIT - long (*audit_p2m)(struct p2m_domain *p2m); -#endif - - /* - * P2M updates may require TLBs to be flushed (invalidated). - * - * If 'defer_flush' is set, flushes may be deferred by setting - * 'need_flush' and then flushing in 'tlb_flush()'. - * - * 'tlb_flush()' is only called if 'need_flush' was set. - * - * If a flush may be being deferred but an immediate flush is - * required (e.g., if a page is being freed to pool other than the - * domheap), call p2m_tlb_flush_sync(). - */ - void (*tlb_flush)(struct p2m_domain *p2m); - unsigned int defer_flush; - bool_t need_flush; - - /* If true, and an access fault comes in and there is no vm_event listener, - * pause domain. Otherwise, remove access restrictions. */ - bool_t access_required; - - /* Highest guest frame that's ever been mapped in the p2m */ - unsigned long max_mapped_pfn; - - /* - * Alternate p2m's only: range of gfn's for which underlying - * mfn may have duplicate mappings - */ - unsigned long min_remapped_gfn; - unsigned long max_remapped_gfn; - -#ifdef CONFIG_HVM - /* Populate-on-demand variables - * All variables are protected with the pod lock. We cannot rely on - * the p2m lock if it's turned into a fine-grained lock. - * We only use the domain page_alloc lock for additions and - * deletions to the domain's page list. Because we use it nested - * within the PoD lock, we enforce it's ordering (by remembering - * the unlock level in the arch_domain sub struct). */ - struct { - struct page_list_head super, /* List of superpages */ - single; /* Non-super lists */ - long count, /* # of pages in cache lists */ - entry_count; /* # of pages in p2m marked pod */ - gfn_t reclaim_single; /* Last gfn of a scan */ - gfn_t max_guest; /* gfn of max guest demand-populate */ - - /* - * Tracking of the most recently populated PoD pages, for eager - * reclamation. - */ - struct pod_mrp_list { -#define NR_POD_MRP_ENTRIES 32 - -/* Encode ORDER_2M superpage in top bit of GFN */ -#define POD_LAST_SUPERPAGE (gfn_x(INVALID_GFN) & ~(gfn_x(INVALID_GFN) >> 1)) - - unsigned long list[NR_POD_MRP_ENTRIES]; - unsigned int idx; - } mrp; - mm_lock_t lock; /* Locking of private pod structs, * - * not relying on the p2m lock. */ - } pod; - - /* - * Host p2m: when this flag is set, don't flush all the nested-p2m - * tables on every host-p2m change. The setter of this flag - * is responsible for performing the full flush before releasing the - * host p2m's lock. - */ - bool defer_nested_flush; - - /* - * Nested p2ms only: nested p2m base value that this p2m shadows. - * This can be cleared to P2M_BASE_EADDR under the per-p2m lock but - * needs both the per-p2m lock and the per-domain nestedp2m lock - * to set it to any other value. - */ -#define P2M_BASE_EADDR (~0ULL) - uint64_t np2m_base; - uint64_t np2m_generation; - - /* - * Nested p2ms: linked list of n2pms allocated to this domain. - * The host p2m hasolds the head of the list and the np2ms are - * threaded on in LRU order. - */ - struct list_head np2m_list; -#endif - - union { - struct ept_data ept; - /* NPT-equivalent structure could be added here. */ - }; - - struct { - spinlock_t lock; - /* - * ioreq server who's responsible for the emulation of - * gfns with specific p2m type(for now, p2m_ioreq_server). - */ - struct ioreq_server *server; - /* - * flags specifies whether read, write or both operations - * are to be emulated by an ioreq server. - */ - unsigned int flags; - unsigned long entry_count; - } ioreq; -}; - -/* get host p2m table */ -#define p2m_get_hostp2m(d) ((d)->arch.p2m) - -/* All common type definitions should live ahead of this inclusion. */ -#ifdef _XEN_P2M_COMMON_H -# error "xen/p2m-common.h should not be included directly" -#endif -#include - -static inline bool arch_acquire_resource_check(struct domain *d) -{ - /* - * FIXME: Until foreign pages inserted into the P2M are properly - * reference counted, it is unsafe to allow mapping of - * resource pages unless the caller is the hardware domain - * (see set_foreign_p2m_entry()). - */ - return !paging_mode_translate(d) || is_hardware_domain(d); -} - -/* - * Updates vCPU's n2pm to match its np2m_base in VMCx12 and returns that np2m. - */ -struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v); -/* Similar to the above except that returned p2m is still write-locked */ -struct p2m_domain *p2m_get_nestedp2m_locked(struct vcpu *v); - -/* If vcpu is in host mode then behaviour matches p2m_get_hostp2m(). - * If vcpu is in guest mode then behaviour matches p2m_get_nestedp2m(). - */ -struct p2m_domain *p2m_get_p2m(struct vcpu *v); - -#define NP2M_SCHEDLE_IN 0 -#define NP2M_SCHEDLE_OUT 1 - -#ifdef CONFIG_HVM -void np2m_schedule(int dir); -#else -static inline void np2m_schedule(int dir) {} -#endif - -static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m) -{ - return p2m->p2m_class == p2m_host; -} - -static inline bool_t p2m_is_nestedp2m(const struct p2m_domain *p2m) -{ - return p2m->p2m_class == p2m_nested; -} - -static inline bool_t p2m_is_altp2m(const struct p2m_domain *p2m) -{ - return p2m->p2m_class == p2m_alternate; -} - -#define p2m_get_pagetable(p2m) ((p2m)->phys_table) - -/* - * Ensure any deferred p2m TLB flush has been completed on all VCPUs. - */ -void p2m_tlb_flush_sync(struct p2m_domain *p2m); -void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m); - -/**** p2m query accessors. They lock p2m_lock, and thus serialize - * lookups wrt modifications. They _do not_ release the lock on exit. - * After calling any of the variants below, caller needs to use - * put_gfn. ****/ - -mfn_t __nonnull(3, 4) __get_gfn_type_access( - struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, - p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked); - -/* Read a particular P2M table, mapping pages as we go. Most callers - * should _not_ call this directly; use the other get_gfn* functions - * below unless you know you want to walk a p2m that isn't a domain's - * main one. - * If the lookup succeeds, the return value is != INVALID_MFN and - * *page_order is filled in with the order of the superpage (if any) that - * the entry was found in. */ -static inline mfn_t __nonnull(3, 4) get_gfn_type_access( - struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, - p2m_access_t *a, p2m_query_t q, unsigned int *page_order) -{ - return __get_gfn_type_access(p2m, gfn, t, a, q, page_order, true); -} - -/* General conversion function from gfn to mfn */ -static inline mfn_t __nonnull(3) get_gfn_type( - struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) -{ - p2m_access_t a; - return get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, q, NULL); -} - -/* Syntactic sugar: most callers will use one of these. */ -#define get_gfn(d, g, t) get_gfn_type((d), (g), (t), P2M_ALLOC) -#define get_gfn_query(d, g, t) get_gfn_type((d), (g), (t), 0) -#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \ - P2M_ALLOC | P2M_UNSHARE) - -/* Will release the p2m_lock for this gfn entry. */ -void __put_gfn(struct p2m_domain *p2m, unsigned long gfn); - -#define put_gfn(d, gfn) __put_gfn(p2m_get_hostp2m((d)), (gfn)) - -/* The intent of the "unlocked" accessor is to have the caller not worry about - * put_gfn. They apply to very specific situations: debug printk's, dumps - * during a domain crash, or to peek at a p2m entry/type. Caller is not - * holding the p2m entry exclusively during or after calling this. - * - * This is also used in the shadow code whenever the paging lock is - * held -- in those cases, the caller is protected against concurrent - * p2m updates by the fact that write_p2m_entry() also takes - * the paging lock. - * - * Note that an unlocked accessor only makes sense for a "query" lookup. - * Any other type of query can cause a change in the p2m and may need to - * perform locking. - */ -static inline mfn_t get_gfn_query_unlocked(struct domain *d, - unsigned long gfn, - p2m_type_t *t) -{ - p2m_access_t a; - return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0); -} - -/* Atomically look up a GFN and take a reference count on the backing page. - * This makes sure the page doesn't get freed (or shared) underfoot, - * and should be used by any path that intends to write to the backing page. - * Returns NULL if the page is not backed by RAM. - * The caller is responsible for calling put_page() afterwards. */ -struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn, - p2m_type_t *t, p2m_access_t *a, - p2m_query_t q); - -static inline struct page_info *get_page_from_gfn( - struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) -{ - struct page_info *page; - - if ( paging_mode_translate(d) ) - return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, q); - - /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */ - if ( t ) - *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct; - page = mfn_to_page(_mfn(gfn)); - return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL; -} - -/* General conversion function from mfn to gfn */ -static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn) -{ - if ( paging_mode_translate(d) ) - return _gfn(get_gpfn_from_mfn(mfn_x(mfn))); - else - return _gfn(mfn_x(mfn)); -} - -#ifdef CONFIG_HVM -#define AP2MGET_prepopulate true -#define AP2MGET_query false - -/* - * Looks up altp2m entry. If the entry is not found it looks up the entry in - * hostp2m. - * The prepopulate param is used to set the found entry in altp2m. - */ -int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, - p2m_type_t *t, p2m_access_t *a, - bool prepopulate); -#endif - -/* Init the datastructures for later use by the p2m code */ -int p2m_init(struct domain *d); - -/* Allocate a new p2m table for a domain. - * - * Returns 0 for success or -errno. */ -int p2m_alloc_table(struct p2m_domain *p2m); - -/* Return all the p2m resources to Xen. */ -void p2m_teardown(struct p2m_domain *p2m); -void p2m_final_teardown(struct domain *d); - -/* Add a page to a domain's p2m table */ -int guest_physmap_add_entry(struct domain *d, gfn_t gfn, - mfn_t mfn, unsigned int page_order, - p2m_type_t t); - -/* Untyped version for RAM only, for compatibility and PV. */ -int __must_check guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn, - unsigned int page_order); - -/* Set a p2m range as populate-on-demand */ -int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, - unsigned int order); - -#ifdef CONFIG_HVM - -/* Enable hardware-assisted log-dirty. */ -void p2m_enable_hardware_log_dirty(struct domain *d); - -/* Disable hardware-assisted log-dirty */ -void p2m_disable_hardware_log_dirty(struct domain *d); - -/* Flush hardware cached dirty GFNs */ -void p2m_flush_hardware_cached_dirty(struct domain *d); - -#else - -static inline void p2m_flush_hardware_cached_dirty(struct domain *d) {} - -#endif - -/* Change types across all p2m entries in a domain */ -void p2m_change_entry_type_global(struct domain *d, - p2m_type_t ot, p2m_type_t nt); - -/* Change types across a range of p2m entries (start ... end-1) */ -void p2m_change_type_range(struct domain *d, - unsigned long start, unsigned long end, - p2m_type_t ot, p2m_type_t nt); - -/* Compare-exchange the type of a single p2m entry */ -int p2m_change_type_one(struct domain *d, unsigned long gfn, - p2m_type_t ot, p2m_type_t nt); - -/* Synchronously change the p2m type for a range of gfns */ -int p2m_finish_type_change(struct domain *d, - gfn_t first_gfn, - unsigned long max_nr); - -int p2m_is_logdirty_range(struct p2m_domain *, unsigned long start, - unsigned long end); - -/* Set mmio addresses in the p2m table (for pass-through) */ -int set_mmio_p2m_entry(struct domain *d, gfn_t gfn, mfn_t mfn, - unsigned int order); - -/* Set identity addresses in the p2m table (for pass-through) */ -int set_identity_p2m_entry(struct domain *d, unsigned long gfn, - p2m_access_t p2ma, unsigned int flag); -int clear_identity_p2m_entry(struct domain *d, unsigned long gfn); - -/* - * Populate-on-demand - */ - -/* Dump PoD information about the domain */ -void p2m_pod_dump_data(struct domain *d); - -#ifdef CONFIG_HVM - -/* Report a change affecting memory types. */ -void p2m_memory_type_changed(struct domain *d); - -/* Called by p2m code when demand-populating a PoD page */ -bool -p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order); - -/* Move all pages from the populate-on-demand cache to the domain page_list - * (usually in preparation for domain destruction) */ -int p2m_pod_empty_cache(struct domain *d); - -/* Set populate-on-demand cache size so that the total memory allocated to a - * domain matches target */ -int p2m_pod_set_mem_target(struct domain *d, unsigned long target); - -/* Scan pod cache when offline/broken page triggered */ -int -p2m_pod_offline_or_broken_hit(struct page_info *p); - -/* Replace pod cache when offline/broken page triggered */ -void -p2m_pod_offline_or_broken_replace(struct page_info *p); - -static inline long p2m_pod_entry_count(const struct p2m_domain *p2m) -{ - return p2m->pod.entry_count; -} - -void p2m_pod_init(struct p2m_domain *p2m); - -#else - -static inline bool -p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order) -{ - return false; -} - -static inline int p2m_pod_empty_cache(struct domain *d) -{ - return 0; -} - -static inline int p2m_pod_offline_or_broken_hit(struct page_info *p) -{ - return 0; -} - -static inline void p2m_pod_offline_or_broken_replace(struct page_info *p) -{ - ASSERT_UNREACHABLE(); -} - -static inline long p2m_pod_entry_count(const struct p2m_domain *p2m) -{ - return 0; -} - -static inline void p2m_pod_init(struct p2m_domain *p2m) {} - -#endif - - -/* - * Paging to disk and page-sharing - */ - -/* Modify p2m table for shared gfn */ -int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); - -/* Tell xenpaging to drop a paged out frame */ -void p2m_mem_paging_drop_page(struct domain *d, gfn_t gfn, p2m_type_t p2mt); -/* Start populating a paged out frame */ -void p2m_mem_paging_populate(struct domain *d, gfn_t gfn); -/* Resume normal operation (in case a domain was paused) */ -struct vm_event_st; -void p2m_mem_paging_resume(struct domain *d, struct vm_event_st *rsp); - -/* - * Internal functions, only called by other p2m code - */ - -mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level); -void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg); - -/* Directly set a p2m entry: only for use by p2m code. Does not need - * a call to put_gfn afterwards/ */ -int __must_check p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn, - unsigned int page_order, p2m_type_t p2mt, - p2m_access_t p2ma); - -#if defined(CONFIG_HVM) -/* Set up function pointers for PT implementation: only for use by p2m code */ -extern void p2m_pt_init(struct p2m_domain *p2m); -#else -static inline void p2m_pt_init(struct p2m_domain *p2m) {} -#endif - -void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, - p2m_query_t q, uint32_t *pfec); - -#if P2M_AUDIT -extern void audit_p2m(struct domain *d, - uint64_t *orphans, - uint64_t *m2p_bad, - uint64_t *p2m_bad); -#endif /* P2M_AUDIT */ - -/* Printouts */ -#define P2M_PRINTK(f, a...) \ - debugtrace_printk("p2m: %s(): " f, __func__, ##a) -#define P2M_ERROR(f, a...) \ - printk(XENLOG_G_ERR "pg error: %s(): " f, __func__, ##a) -#if P2M_DEBUGGING -#define P2M_DEBUG(f, a...) \ - debugtrace_printk("p2mdebug: %s(): " f, __func__, ##a) -#else -#define P2M_DEBUG(f, a...) do { (void)(f); } while(0) -#endif - -/* - * Functions specific to the p2m-pt implementation - */ - -/* Extract the type from the PTE flags that store it */ -static inline p2m_type_t p2m_flags_to_type(unsigned int flags) -{ - /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need - * to make sure that an entirely empty PTE doesn't have RAM type */ - if ( flags == 0 ) - return p2m_invalid; - /* AMD IOMMUs use bits 9-11 to encode next io page level and bits - * 59-62 for iommu flags so we can't use them to store p2m type info. */ - return (flags >> 12) & 0x7f; -} - -static inline p2m_type_t p2m_recalc_type_range(bool recalc, p2m_type_t t, - struct p2m_domain *p2m, - unsigned long gfn_start, - unsigned long gfn_end) -{ - if ( !recalc || !p2m_is_changeable(t) ) - return t; - - if ( t == p2m_ioreq_server && p2m->ioreq.server != NULL ) - return t; - - return p2m_is_logdirty_range(p2m, gfn_start, gfn_end) ? p2m_ram_logdirty - : p2m_ram_rw; -} - -static inline p2m_type_t p2m_recalc_type(bool recalc, p2m_type_t t, - struct p2m_domain *p2m, - unsigned long gfn) -{ - return p2m_recalc_type_range(recalc, t, p2m, gfn, gfn); -} - -int p2m_pt_handle_deferred_changes(uint64_t gpa); - -/* - * Nested p2m: shadow p2m tables used for nested HVM virtualization - */ - -/* Flushes specified p2m table */ -void p2m_flush(struct vcpu *v, struct p2m_domain *p2m); -/* Flushes all nested p2m tables */ -void p2m_flush_nestedp2m(struct domain *d); -/* Flushes the np2m specified by np2m_base (if it exists) */ -void np2m_flush_base(struct vcpu *v, unsigned long np2m_base); - -void hap_p2m_init(struct p2m_domain *p2m); -void shadow_p2m_init(struct p2m_domain *p2m); - -void nestedp2m_write_p2m_entry_post(struct p2m_domain *p2m, - unsigned int oflags); - -/* - * Alternate p2m: shadow p2m tables used for alternate memory views - */ -#ifdef CONFIG_HVM -/* get current alternate p2m table */ -static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v) -{ - unsigned int index = vcpu_altp2m(v).p2midx; - - if ( index == INVALID_ALTP2M ) - return NULL; - - BUG_ON(index >= MAX_ALTP2M); - - return v->domain->arch.altp2m_p2m[index]; -} - -/* Switch alternate p2m for a single vcpu */ -bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx); - -/* Check to see if vcpu should be switched to a different p2m. */ -void p2m_altp2m_check(struct vcpu *v, uint16_t idx); - -/* Flush all the alternate p2m's for a domain */ -void p2m_flush_altp2m(struct domain *d); - -/* Alternate p2m paging */ -bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l, - mfn_t *mfn, p2m_type_t *p2mt, - p2m_access_t *p2ma, unsigned int page_order); - -/* Make a specific alternate p2m valid */ -int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx); - -/* Find an available alternate p2m and make it valid */ -int p2m_init_next_altp2m(struct domain *d, uint16_t *idx, - xenmem_access_t hvmmem_default_access); - -/* Make a specific alternate p2m invalid */ -int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx); - -/* Switch alternate p2m for entire domain */ -int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx); - -/* Change a gfn->mfn mapping */ -int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, - gfn_t old_gfn, gfn_t new_gfn); - -/* Propagate a host p2m change to all alternate p2m's */ -int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, - mfn_t mfn, unsigned int page_order, - p2m_type_t p2mt, p2m_access_t p2ma); - -/* Set a specific p2m view visibility */ -int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int idx, - uint8_t visible); -#else -struct p2m_domain *p2m_get_altp2m(struct vcpu *v); -static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) {} -#endif - -/* p2m access to IOMMU flags */ -static inline unsigned int p2m_access_to_iommu_flags(p2m_access_t p2ma) -{ - switch ( p2ma ) - { - case p2m_access_rw: - case p2m_access_rwx: - return IOMMUF_readable | IOMMUF_writable; - - case p2m_access_r: - case p2m_access_rx: - case p2m_access_rx2rw: - return IOMMUF_readable; - - case p2m_access_w: - case p2m_access_wx: - return IOMMUF_writable; - - case p2m_access_n: - case p2m_access_x: - case p2m_access_n2rwx: - return 0; - } - - ASSERT_UNREACHABLE(); - return 0; -} - -/* - * p2m type to IOMMU flags - */ -static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt, - p2m_access_t p2ma, mfn_t mfn) -{ - unsigned int flags; - - switch( p2mt ) - { - case p2m_ram_rw: - case p2m_grant_map_rw: - case p2m_ram_logdirty: - case p2m_map_foreign: - flags = IOMMUF_readable | IOMMUF_writable; - break; - case p2m_ram_ro: - case p2m_grant_map_ro: - flags = IOMMUF_readable; - break; - case p2m_mmio_direct: - flags = p2m_access_to_iommu_flags(p2ma); - if ( (flags & IOMMUF_writable) && - rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) ) - flags &= ~IOMMUF_writable; - break; - default: - flags = 0; - break; - } - - return flags; -} - -int p2m_set_ioreq_server(struct domain *d, unsigned int flags, - struct ioreq_server *s); -struct ioreq_server *p2m_get_ioreq_server(struct domain *d, - unsigned int *flags); - -static inline int p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt, - p2m_type_t ot, mfn_t nfn, mfn_t ofn, - unsigned int level) -{ - BUG_ON(!level); - BUG_ON(level > 1 && (nt == p2m_ioreq_server || nt == p2m_map_foreign)); - - if ( level != 1 || (nt == ot && mfn_eq(nfn, ofn)) ) - return 0; - - switch ( nt ) - { - case p2m_ioreq_server: - /* - * p2m_ioreq_server is only used for 4K pages, so - * the count is only done for level 1 entries. - */ - p2m->ioreq.entry_count++; - break; - - case p2m_map_foreign: - if ( !mfn_valid(nfn) ) - { - ASSERT_UNREACHABLE(); - return -EINVAL; - } - - if ( !page_get_owner_and_reference(mfn_to_page(nfn)) ) - return -EBUSY; - - break; - - default: - break; - } - - switch ( ot ) - { - case p2m_ioreq_server: - ASSERT(p2m->ioreq.entry_count > 0); - p2m->ioreq.entry_count--; - break; - - case p2m_map_foreign: - if ( !mfn_valid(ofn) ) - { - ASSERT_UNREACHABLE(); - return -EINVAL; - } - put_page(mfn_to_page(ofn)); - break; - - default: - break; - } - - return 0; -} - -#endif /* _XEN_ASM_X86_P2M_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/page-bits.h b/xen/include/asm-x86/page-bits.h deleted file mode 100644 index 6f7fc7d035..0000000000 --- a/xen/include/asm-x86/page-bits.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef __X86_PAGE_SHIFT_H__ -#define __X86_PAGE_SHIFT_H__ - -#define L1_PAGETABLE_SHIFT 12 -#define L2_PAGETABLE_SHIFT 21 -#define L3_PAGETABLE_SHIFT 30 -#define L4_PAGETABLE_SHIFT 39 -#define PAGE_SHIFT L1_PAGETABLE_SHIFT -#define SUPERPAGE_SHIFT L2_PAGETABLE_SHIFT -#define ROOT_PAGETABLE_SHIFT L4_PAGETABLE_SHIFT - -#define PAGETABLE_ORDER 9 -#define L1_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) -#define L2_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) -#define L3_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) -#define L4_PAGETABLE_ENTRIES (1 << PAGETABLE_ORDER) -#define ROOT_PAGETABLE_ENTRIES L4_PAGETABLE_ENTRIES - -#define SUPERPAGE_ORDER PAGETABLE_ORDER -#define SUPERPAGE_PAGES (1 << SUPERPAGE_ORDER) - -/* These are architectural limits. */ -#define PADDR_BITS 52 -#define VADDR_BITS 48 - -#endif /* __X86_PAGE_SHIFT_H__ */ diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h deleted file mode 100644 index 1d080cffbe..0000000000 --- a/xen/include/asm-x86/page.h +++ /dev/null @@ -1,409 +0,0 @@ -#ifndef __X86_PAGE_H__ -#define __X86_PAGE_H__ - -#include -#include - -#define PAGE_ORDER_4K 0 -#define PAGE_ORDER_2M 9 -#define PAGE_ORDER_1G 18 - -#ifndef __ASSEMBLY__ -# include -# include -#endif - -#include - -/* Read a pte atomically from memory. */ -#define l1e_read_atomic(l1ep) \ - l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep)))) -#define l2e_read_atomic(l2ep) \ - l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep)))) -#define l3e_read_atomic(l3ep) \ - l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep)))) -#define l4e_read_atomic(l4ep) \ - l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep)))) - -/* Write a pte atomically to memory. */ -#define l1e_write_atomic(l1ep, l1e) \ - pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e)) -#define l2e_write_atomic(l2ep, l2e) \ - pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e)) -#define l3e_write_atomic(l3ep, l3e) \ - pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e)) -#define l4e_write_atomic(l4ep, l4e) \ - pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e)) - -/* - * Write a pte safely but non-atomically to memory. - * The PTE may become temporarily not-present during the update. - */ -#define l1e_write(l1ep, l1e) \ - pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e)) -#define l2e_write(l2ep, l2e) \ - pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e)) -#define l3e_write(l3ep, l3e) \ - pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e)) -#define l4e_write(l4ep, l4e) \ - pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e)) - -/* Get direct integer representation of a pte's contents (intpte_t). */ -#define l1e_get_intpte(x) ((x).l1) -#define l2e_get_intpte(x) ((x).l2) -#define l3e_get_intpte(x) ((x).l3) -#define l4e_get_intpte(x) ((x).l4) - -/* Get pfn mapped by pte (unsigned long). */ -#define l1e_get_pfn(x) \ - ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) -#define l2e_get_pfn(x) \ - ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) -#define l3e_get_pfn(x) \ - ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) -#define l4e_get_pfn(x) \ - ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)) - -/* Get mfn mapped by pte (mfn_t). */ -#define l1e_get_mfn(x) _mfn(l1e_get_pfn(x)) -#define l2e_get_mfn(x) _mfn(l2e_get_pfn(x)) -#define l3e_get_mfn(x) _mfn(l3e_get_pfn(x)) -#define l4e_get_mfn(x) _mfn(l4e_get_pfn(x)) - -/* Get physical address of page mapped by pte (paddr_t). */ -#define l1e_get_paddr(x) \ - ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK)))) -#define l2e_get_paddr(x) \ - ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK)))) -#define l3e_get_paddr(x) \ - ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK)))) -#define l4e_get_paddr(x) \ - ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK)))) - -/* Get pointer to info structure of page mapped by pte (struct page_info *). */ -#define l1e_get_page(x) mfn_to_page(l1e_get_mfn(x)) -#define l2e_get_page(x) mfn_to_page(l2e_get_mfn(x)) -#define l3e_get_page(x) mfn_to_page(l3e_get_mfn(x)) -#define l4e_get_page(x) mfn_to_page(l4e_get_mfn(x)) - -/* Get pte access flags (unsigned int). */ -#define l1e_get_flags(x) (get_pte_flags((x).l1)) -#define l2e_get_flags(x) (get_pte_flags((x).l2)) -#define l3e_get_flags(x) (get_pte_flags((x).l3)) -#define l4e_get_flags(x) (get_pte_flags((x).l4)) - -/* Get pte pkeys (unsigned int). */ -#define l1e_get_pkey(x) get_pte_pkey((x).l1) -#define l2e_get_pkey(x) get_pte_pkey((x).l2) -#define l3e_get_pkey(x) get_pte_pkey((x).l3) - -/* Construct an empty pte. */ -#define l1e_empty() ((l1_pgentry_t) { 0 }) -#define l2e_empty() ((l2_pgentry_t) { 0 }) -#define l3e_empty() ((l3_pgentry_t) { 0 }) -#define l4e_empty() ((l4_pgentry_t) { 0 }) - -/* Construct a pte from a pfn and access flags. */ -#define l1e_from_pfn(pfn, flags) \ - ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) -#define l2e_from_pfn(pfn, flags) \ - ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) -#define l3e_from_pfn(pfn, flags) \ - ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) -#define l4e_from_pfn(pfn, flags) \ - ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) }) - -/* Construct a pte from an mfn and access flags. */ -#define l1e_from_mfn(m, f) l1e_from_pfn(mfn_x(m), f) -#define l2e_from_mfn(m, f) l2e_from_pfn(mfn_x(m), f) -#define l3e_from_mfn(m, f) l3e_from_pfn(mfn_x(m), f) -#define l4e_from_mfn(m, f) l4e_from_pfn(mfn_x(m), f) - -/* Construct a pte from a physical address and access flags. */ -#ifndef __ASSEMBLY__ -static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags) -{ - ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); - return (l1_pgentry_t) { pa | put_pte_flags(flags) }; -} -static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags) -{ - ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); - return (l2_pgentry_t) { pa | put_pte_flags(flags) }; -} -static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags) -{ - ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); - return (l3_pgentry_t) { pa | put_pte_flags(flags) }; -} -static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags) -{ - ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0); - return (l4_pgentry_t) { pa | put_pte_flags(flags) }; -} -#endif /* !__ASSEMBLY__ */ - -/* Construct a pte from its direct integer representation. */ -#define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) }) -#define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) }) -#define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) }) -#define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) }) - -/* Construct a pte from a page pointer and access flags. */ -#define l1e_from_page(page, flags) l1e_from_mfn(page_to_mfn(page), flags) -#define l2e_from_page(page, flags) l2e_from_mfn(page_to_mfn(page), flags) -#define l3e_from_page(page, flags) l3e_from_mfn(page_to_mfn(page), flags) -#define l4e_from_page(page, flags) l4e_from_mfn(page_to_mfn(page), flags) - -/* Add extra flags to an existing pte. */ -#define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags)) -#define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags)) -#define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags)) -#define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags)) - -/* Remove flags from an existing pte. */ -#define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags)) -#define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags)) -#define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags)) -#define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags)) - -/* Flip flags in an existing L1 PTE. */ -#define l1e_flip_flags(x, flags) ((x).l1 ^= put_pte_flags(flags)) - -/* Check if a pte's page mapping or significant access flags have changed. */ -#define l1e_has_changed(x,y,flags) \ - ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) -#define l2e_has_changed(x,y,flags) \ - ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) -#define l3e_has_changed(x,y,flags) \ - ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) -#define l4e_has_changed(x,y,flags) \ - ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) - -#define map_l1t_from_l2e(x) (l1_pgentry_t *)map_domain_page(l2e_get_mfn(x)) -#define map_l2t_from_l3e(x) (l2_pgentry_t *)map_domain_page(l3e_get_mfn(x)) -#define map_l3t_from_l4e(x) (l3_pgentry_t *)map_domain_page(l4e_get_mfn(x)) - -/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct map. */ -#define l1e_from_l2e(l2e_, offset_) ({ \ - const l1_pgentry_t *l1t_ = map_l1t_from_l2e(l2e_); \ - l1_pgentry_t l1e_ = l1t_[offset_]; \ - unmap_domain_page(l1t_); \ - l1e_; }) - -#define l2e_from_l3e(l3e_, offset_) ({ \ - const l2_pgentry_t *l2t_ = map_l2t_from_l3e(l3e_); \ - l2_pgentry_t l2e_ = l2t_[offset_]; \ - unmap_domain_page(l2t_); \ - l2e_; }) - -#define l3e_from_l4e(l4e_, offset_) ({ \ - const l3_pgentry_t *l3t_ = map_l3t_from_l4e(l4e_); \ - l3_pgentry_t l3e_ = l3t_[offset_]; \ - unmap_domain_page(l3t_); \ - l3e_; }) - -/* Given a virtual address, get an entry offset into a page table. */ -#define l1_table_offset(a) \ - (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) -#define l2_table_offset(a) \ - (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)) -#define l3_table_offset(a) \ - (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)) -#define l4_table_offset(a) \ - (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)) - -/* Convert a pointer to a page-table entry into pagetable slot index. */ -#define pgentry_ptr_to_slot(_p) \ - (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p))) - -#ifndef __ASSEMBLY__ - -/* Page-table type. */ -typedef struct { u64 pfn; } pagetable_t; -#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT) -#define pagetable_get_page(x) mfn_to_page(pagetable_get_mfn(x)) -#define pagetable_get_pfn(x) ((x).pfn) -#define pagetable_get_mfn(x) _mfn(((x).pfn)) -#define pagetable_is_null(x) ((x).pfn == 0) -#define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) }) -#define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) }) -#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg)) -#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT) -#define pagetable_null() pagetable_from_pfn(0) - -void clear_page_sse2(void *); -void copy_page_sse2(void *, const void *); - -#define clear_page(_p) clear_page_sse2(_p) -#define copy_page(_t, _f) copy_page_sse2(_t, _f) - -/* Convert between Xen-heap virtual addresses and machine addresses. */ -#define __pa(x) (virt_to_maddr(x)) -#define __va(x) (maddr_to_virt(x)) - -/* Convert between Xen-heap virtual addresses and machine frame numbers. */ -#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) -#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT)) - -/* Convert between machine frame numbers and page-info structures. */ -#define mfn_to_page(mfn) (frame_table + mfn_to_pdx(mfn)) -#define page_to_mfn(pg) pdx_to_mfn((unsigned long)((pg) - frame_table)) - -/* Convert between machine addresses and page-info structures. */ -#define __maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma)) -#define __page_to_maddr(pg) mfn_to_maddr(page_to_mfn(pg)) - -/* Convert between frame number and address formats. */ -#define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT) -#define __paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT)) -#define gfn_to_gaddr(gfn) __pfn_to_paddr(gfn_x(gfn)) -#define gaddr_to_gfn(ga) _gfn(__paddr_to_pfn(ga)) -#define mfn_to_maddr(mfn) __pfn_to_paddr(mfn_x(mfn)) -#define maddr_to_mfn(ma) _mfn(__paddr_to_pfn(ma)) - -/* - * We define non-underscored wrappers for above conversion functions. These are - * overridden in various source files while underscored versions remain intact. - */ -#define mfn_valid(mfn) __mfn_valid(mfn_x(mfn)) -#define virt_to_mfn(va) __virt_to_mfn(va) -#define mfn_to_virt(mfn) __mfn_to_virt(mfn) -#define virt_to_maddr(va) __virt_to_maddr((unsigned long)(va)) -#define maddr_to_virt(ma) __maddr_to_virt((unsigned long)(ma)) -#define maddr_to_page(ma) __maddr_to_page(ma) -#define page_to_maddr(pg) __page_to_maddr(pg) -#define virt_to_page(va) __virt_to_page(va) -#define page_to_virt(pg) __page_to_virt(pg) -#define pfn_to_paddr(pfn) __pfn_to_paddr(pfn) -#define paddr_to_pfn(pa) __paddr_to_pfn(pa) -#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa)) -#define vmap_to_mfn(va) xen_map_to_mfn((unsigned long)(va)) -#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va)) - -#endif /* !defined(__ASSEMBLY__) */ - -/* Where to find each level of the linear mapping */ -#define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START)) -#define __linear_l2_table \ - ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START))) -#define __linear_l3_table \ - ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START))) -#define __linear_l4_table \ - ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START))) - - -#ifndef __ASSEMBLY__ -extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES]; -extern l2_pgentry_t *compat_idle_pg_table_l2; -extern unsigned int m2p_compat_vstart; -extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES], - l2_bootmap[4*L2_PAGETABLE_ENTRIES]; -extern l3_pgentry_t l3_bootmap[L3_PAGETABLE_ENTRIES]; -extern l2_pgentry_t l2_directmap[4*L2_PAGETABLE_ENTRIES]; -extern l1_pgentry_t l1_fixmap[L1_PAGETABLE_ENTRIES]; -void paging_init(void); -void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t); -#endif /* !defined(__ASSEMBLY__) */ - -#define _PAGE_NONE _AC(0x000,U) -#define _PAGE_PRESENT _AC(0x001,U) -#define _PAGE_RW _AC(0x002,U) -#define _PAGE_USER _AC(0x004,U) -#define _PAGE_PWT _AC(0x008,U) -#define _PAGE_PCD _AC(0x010,U) -#define _PAGE_ACCESSED _AC(0x020,U) -#define _PAGE_DIRTY _AC(0x040,U) -#define _PAGE_PAT _AC(0x080,U) -#define _PAGE_PSE _AC(0x080,U) -#define _PAGE_GLOBAL _AC(0x100,U) -#define _PAGE_AVAIL0 _AC(0x200,U) -#define _PAGE_AVAIL1 _AC(0x400,U) -#define _PAGE_AVAIL2 _AC(0x800,U) -#define _PAGE_AVAIL _AC(0xE00,U) -#define _PAGE_PSE_PAT _AC(0x1000,U) -#define _PAGE_AVAIL_HIGH (_AC(0x7ff, U) << 12) - -#ifndef __ASSEMBLY__ -/* Dependency on NX being available can't be expressed. */ -#define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0) -#endif - -#define PAGE_CACHE_ATTRS (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) - -/* - * Debug option: Ensure that granted mappings are not implicitly unmapped. - * WARNING: This will need to be disabled to run OSes that use the spare PTE - * bits themselves (e.g., *BSD). - */ -#ifdef NDEBUG -#undef _PAGE_GNTTAB -#endif -#ifndef _PAGE_GNTTAB -#define _PAGE_GNTTAB 0 -#endif - -#define __PAGE_HYPERVISOR_RO (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NX) -#define __PAGE_HYPERVISOR_RW (__PAGE_HYPERVISOR_RO | \ - _PAGE_DIRTY | _PAGE_RW) -#define __PAGE_HYPERVISOR_RX (_PAGE_PRESENT | _PAGE_ACCESSED) -#define __PAGE_HYPERVISOR (__PAGE_HYPERVISOR_RX | \ - _PAGE_DIRTY | _PAGE_RW) -#define __PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR | _PAGE_PCD) -#define __PAGE_HYPERVISOR_UC (__PAGE_HYPERVISOR | _PAGE_PCD | _PAGE_PWT) -#define __PAGE_HYPERVISOR_SHSTK (__PAGE_HYPERVISOR_RO | _PAGE_DIRTY) - -#define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages mappings */ - -#ifndef __ASSEMBLY__ - -/* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */ -static inline unsigned int pte_flags_to_cacheattr(unsigned int flags) -{ - return ((flags >> 5) & 4) | ((flags >> 3) & 3); -} -static inline unsigned int cacheattr_to_pte_flags(unsigned int cacheattr) -{ - return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3); -} - -/* return true if permission increased */ -static inline bool_t -perms_strictly_increased(uint32_t old_flags, uint32_t new_flags) -/* Given the flags of two entries, are the new flags a strict - * increase in rights over the old ones? */ -{ - uint32_t of = old_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT); - uint32_t nf = new_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT); - /* Flip the NX bit, since it's the only one that decreases rights; - * we calculate as if it were an "X" bit. */ - of ^= _PAGE_NX_BIT; - nf ^= _PAGE_NX_BIT; - /* If the changed bits are all set in the new flags, then rights strictly - * increased between old and new. */ - return ((of | (of ^ nf)) == nf); -} - -static inline void invalidate_icache(void) -{ -/* - * There is nothing to be done here as icaches are sufficiently - * coherent on x86. - */ -} - -#endif /* !__ASSEMBLY__ */ - -#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) - -#endif /* __X86_PAGE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h deleted file mode 100644 index 308f1115dd..0000000000 --- a/xen/include/asm-x86/paging.h +++ /dev/null @@ -1,433 +0,0 @@ -/****************************************************************************** - * include/asm-x86/paging.h - * - * Common interface for paging support - * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) - * Parts of this code are Copyright (c) 2006 by XenSource Inc. - * Parts of this code are Copyright (c) 2006 by Michael A Fetterman - * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef _XEN_PAGING_H -#define _XEN_PAGING_H - -#include -#include -#include -#include -#include -#include -#include - -/***************************************************************************** - * Macros to tell which paging mode a domain is in */ - -#define PG_SH_shift 20 -#define PG_HAP_shift 21 -#define PG_SHF_shift 22 -/* We're in one of the shadow modes */ -#ifdef CONFIG_SHADOW_PAGING -#define PG_SH_enable (1U << PG_SH_shift) -#define PG_SH_forced (1U << PG_SHF_shift) -#else -#define PG_SH_enable 0 -#define PG_SH_forced 0 -#endif -#ifdef CONFIG_HVM -#define PG_HAP_enable (1U << PG_HAP_shift) -#else -#define PG_HAP_enable 0 -#endif - -/* common paging mode bits */ -#define PG_mode_shift 10 -#ifdef CONFIG_HVM -/* Refcounts based on shadow tables instead of guest tables */ -#define PG_refcounts (XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT << PG_mode_shift) -/* Xen does p2m translation, not guest */ -#define PG_translate (XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE << PG_mode_shift) -/* Xen does not steal address space from the domain for its own booking; - * requires VT or similar mechanisms */ -#define PG_external (XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL << PG_mode_shift) -#else -#define PG_refcounts 0 -#define PG_translate 0 -#define PG_external 0 -#endif -#if defined(CONFIG_HVM) || !defined(CONFIG_PV_SHIM_EXCLUSIVE) -/* Enable log dirty mode */ -#define PG_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << PG_mode_shift) -#else -#define PG_log_dirty 0 -#endif - -/* All paging modes. */ -#define PG_MASK (PG_refcounts | PG_log_dirty | PG_translate | PG_external) - -#define paging_mode_enabled(_d) (!!(_d)->arch.paging.mode) -#define paging_mode_shadow(_d) (!!((_d)->arch.paging.mode & PG_SH_enable)) -#define paging_mode_sh_forced(_d) (!!((_d)->arch.paging.mode & PG_SH_forced)) -#define paging_mode_hap(_d) (!!((_d)->arch.paging.mode & PG_HAP_enable)) - -#define paging_mode_refcounts(_d) (!!((_d)->arch.paging.mode & PG_refcounts)) -#define paging_mode_log_dirty(_d) (!!((_d)->arch.paging.mode & PG_log_dirty)) -#define paging_mode_translate(_d) (!!((_d)->arch.paging.mode & PG_translate)) -#define paging_mode_external(_d) (!!((_d)->arch.paging.mode & PG_external)) - -/* flags used for paging debug */ -#define PAGING_DEBUG_LOGDIRTY 0 - -/***************************************************************************** - * Mode-specific entry points into the shadow code. - * - * These shouldn't be used directly by callers; rather use the functions - * below which will indirect through this table as appropriate. */ - -struct shadow_paging_mode { -#ifdef CONFIG_SHADOW_PAGING - void (*detach_old_tables )(struct vcpu *v); -#ifdef CONFIG_PV - void (*write_guest_entry )(struct vcpu *v, intpte_t *p, - intpte_t new, mfn_t gmfn); - intpte_t (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p, - intpte_t old, intpte_t new, - mfn_t gmfn); -#endif -#ifdef CONFIG_HVM - int (*guess_wrmap )(struct vcpu *v, - unsigned long vaddr, mfn_t gmfn); - void (*pagetable_dying )(paddr_t gpa); - void (*trace_emul_write_val )(const void *ptr, unsigned long vaddr, - const void *src, unsigned int bytes); -#endif -#endif - /* For outsiders to tell what mode we're in */ - unsigned int shadow_levels; -}; - - -/************************************************/ -/* common paging interface */ -/************************************************/ -struct paging_mode { - int (*page_fault )(struct vcpu *v, unsigned long va, - struct cpu_user_regs *regs); - bool (*invlpg )(struct vcpu *v, - unsigned long linear); -#ifdef CONFIG_HVM - unsigned long (*gva_to_gfn )(struct vcpu *v, - struct p2m_domain *p2m, - unsigned long va, - uint32_t *pfec); - unsigned long (*p2m_ga_to_gfn )(struct vcpu *v, - struct p2m_domain *p2m, - unsigned long cr3, - paddr_t ga, uint32_t *pfec, - unsigned int *page_order); -#endif - void (*update_cr3 )(struct vcpu *v, int do_locking, - bool noflush); - void (*update_paging_modes )(struct vcpu *v); - bool (*flush_tlb )(const unsigned long *vcpu_bitmap); - - unsigned int guest_levels; - - /* paging support extension */ - struct shadow_paging_mode shadow; -}; - -/***************************************************************************** - * Log dirty code */ - -#if PG_log_dirty - -/* get the dirty bitmap for a specific range of pfns */ -void paging_log_dirty_range(struct domain *d, - unsigned long begin_pfn, - unsigned long nr, - uint8_t *dirty_bitmap); - -/* enable log dirty */ -int paging_log_dirty_enable(struct domain *d, bool log_global); - -/* log dirty initialization */ -void paging_log_dirty_init(struct domain *d, const struct log_dirty_ops *ops); - -/* mark a page as dirty */ -void paging_mark_dirty(struct domain *d, mfn_t gmfn); -/* mark a page as dirty with taking guest pfn as parameter */ -void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn); - -/* is this guest page dirty? - * This is called from inside paging code, with the paging lock held. */ -int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn); - -/* - * Log-dirty radix tree indexing: - * All tree nodes are PAGE_SIZE bytes, mapped on-demand. - * Leaf nodes are simple bitmaps; 1 bit per guest pfn. - * Interior nodes are arrays of LOGDIRTY_NODE_ENTRIES mfns. - * TODO: Dynamic radix tree height. Most guests will only need 2 levels. - * The fourth level is basically unusable on 32-bit Xen. - * TODO2: Abstract out the radix-tree mechanics? - */ -#define LOGDIRTY_NODE_ENTRIES (1 << PAGETABLE_ORDER) -#define L1_LOGDIRTY_IDX(pfn) (pfn_x(pfn) & ((1 << (PAGE_SHIFT + 3)) - 1)) -#define L2_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3)) & \ - (LOGDIRTY_NODE_ENTRIES-1)) -#define L3_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER)) & \ - (LOGDIRTY_NODE_ENTRIES-1)) -#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \ - (LOGDIRTY_NODE_ENTRIES-1)) - -#ifdef CONFIG_HVM -/* VRAM dirty tracking support */ -struct sh_dirty_vram { - unsigned long begin_pfn; - unsigned long end_pfn; -#ifdef CONFIG_SHADOW_PAGING - paddr_t *sl1ma; - uint8_t *dirty_bitmap; - s_time_t last_dirty; -#endif -}; -#endif - -#else /* !PG_log_dirty */ - -static inline void paging_log_dirty_init(struct domain *d, - const struct log_dirty_ops *ops) {} -static inline void paging_mark_dirty(struct domain *d, mfn_t gmfn) {} -static inline void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn) {} -static inline bool paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) { return false; } - -#endif /* PG_log_dirty */ - -/***************************************************************************** - * Entry points into the paging-assistance code */ - -/* Initialize the paging resource for vcpu struct. It is called by - * vcpu_initialise() in domain.c */ -void paging_vcpu_init(struct vcpu *v); - -/* Set up the paging-assistance-specific parts of a domain struct at - * start of day. Called for every domain from arch_domain_create() */ -int paging_domain_init(struct domain *d); - -/* Handler for paging-control ops: operations from user-space to enable - * and disable ephemeral shadow modes (test mode and log-dirty mode) and - * manipulate the log-dirty bitmap. */ -int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl, - bool_t resuming); - -/* Helper hypercall for dealing with continuations. */ -long paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); - -/* Call when destroying a vcpu/domain */ -void paging_vcpu_teardown(struct vcpu *v); -int paging_teardown(struct domain *d); - -/* Call once all of the references to the domain have gone away */ -void paging_final_teardown(struct domain *d); - -/* Enable an arbitrary paging-assistance mode. Call once at domain - * creation. */ -int paging_enable(struct domain *d, u32 mode); - -#define paging_get_hostmode(v) ((v)->arch.paging.mode) -#define paging_get_nestedmode(v) ((v)->arch.paging.nestedmode) -const struct paging_mode *paging_get_mode(struct vcpu *v); -void paging_update_nestedmode(struct vcpu *v); - -/* Page fault handler - * Called from pagefault handler in Xen, and from the HVM trap handlers - * for pagefaults. Returns 1 if this fault was an artefact of the - * paging code (and the guest should retry) or 0 if it is not (and the - * fault should be handled elsewhere or passed to the guest). - * - * Note: under shadow paging, this function handles all page faults; - * however, for hardware-assisted paging, this function handles only - * host page faults (i.e. nested page faults). */ -static inline int -paging_fault(unsigned long va, struct cpu_user_regs *regs) -{ - struct vcpu *v = current; - return paging_get_hostmode(v)->page_fault(v, va, regs); -} - -/* Handle invlpg requests on vcpus. */ -void paging_invlpg(struct vcpu *v, unsigned long va); - -/* - * Translate a guest virtual address to the frame number that the - * *guest* pagetables would map it to. Returns INVALID_GFN if the guest - * tables don't map this address for this kind of access. - * *pfec is used to determine which kind of access this is when - * walking the tables. The caller should set the PFEC_page_present bit - * in *pfec; in the failure case, that bit will be cleared if appropriate. - * - * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS: - * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled. - */ -unsigned long paging_gva_to_gfn(struct vcpu *v, - unsigned long va, - uint32_t *pfec); - -#ifdef CONFIG_HVM - -/* Translate a guest address using a particular CR3 value. This is used - * to by nested HAP code, to walk the guest-supplied NPT tables as if - * they were pagetables. - * Use 'paddr_t' for the guest address so it won't overflow when - * l1 or l2 guest is in 32bit PAE mode. - * If the GFN returned is not INVALID_GFN, *page_order gives - * the size of the superpage (if any) it was found in. */ -static inline unsigned long paging_ga_to_gfn_cr3(struct vcpu *v, - unsigned long cr3, - paddr_t ga, - uint32_t *pfec, - unsigned int *page_order) -{ - struct p2m_domain *p2m = v->domain->arch.p2m; - return paging_get_hostmode(v)->p2m_ga_to_gfn(v, p2m, cr3, ga, pfec, - page_order); -} - -#endif /* CONFIG_HVM */ - -/* Update all the things that are derived from the guest's CR3. - * Called when the guest changes CR3; the caller can then use v->arch.cr3 - * as the value to load into the host CR3 to schedule this vcpu */ -static inline void paging_update_cr3(struct vcpu *v, bool noflush) -{ - paging_get_hostmode(v)->update_cr3(v, 1, noflush); -} - -/* Update all the things that are derived from the guest's CR0/CR3/CR4. - * Called to initialize paging structures if the paging mode - * has changed, and when bringing up a VCPU for the first time. */ -static inline void paging_update_paging_modes(struct vcpu *v) -{ - paging_get_hostmode(v)->update_paging_modes(v); -} - -#ifdef CONFIG_PV - -/* - * Write a new value into the guest pagetable, and update the - * paging-assistance state appropriately. Returns false if we page-faulted, - * true for success. - */ -static inline void paging_write_guest_entry( - struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn) -{ -#ifdef CONFIG_SHADOW_PAGING - if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) ) - paging_get_hostmode(v)->shadow.write_guest_entry(v, p, new, gmfn); - else -#endif - write_atomic(p, new); -} - - -/* - * Cmpxchg a new value into the guest pagetable, and update the - * paging-assistance state appropriately. Returns false if we page-faulted, - * true if not. N.B. caller should check the value of "old" to see if the - * cmpxchg itself was successful. - */ -static inline intpte_t paging_cmpxchg_guest_entry( - struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn) -{ -#ifdef CONFIG_SHADOW_PAGING - if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) ) - return paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old, - new, gmfn); -#endif - return cmpxchg(p, old, new); -} - -#endif /* CONFIG_PV */ - -/* Helper function that writes a pte in such a way that a concurrent read - * never sees a half-written entry that has _PAGE_PRESENT set */ -static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new) -{ - *p = new; -} - -/* - * Called from the guest to indicate that the a process is being - * torn down and its pagetables will soon be discarded. - */ -void pagetable_dying(paddr_t gpa); - -/* Print paging-assistance info to the console */ -void paging_dump_domain_info(struct domain *d); -void paging_dump_vcpu_info(struct vcpu *v); - -/* Set the pool of shadow pages to the required number of pages. - * Input might be rounded up to at minimum amount of pages, plus - * space for the p2m table. - * Returns 0 for success, non-zero for failure. */ -int paging_set_allocation(struct domain *d, unsigned int pages, - bool *preempted); - -/* Is gfn within maxphysaddr for the domain? */ -static inline bool gfn_valid(const struct domain *d, gfn_t gfn) -{ - return !(gfn_x(gfn) >> (d->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT)); -} - -/* Maxphysaddr supportable by the paging infrastructure. */ -static always_inline unsigned int paging_max_paddr_bits(const struct domain *d) -{ - unsigned int bits = paging_mode_hap(d) ? hap_paddr_bits : paddr_bits; - - if ( paging_mode_external(d) ) - { - if ( !IS_ENABLED(CONFIG_BIGMEM) && paging_mode_shadow(d) ) - { - /* Shadowed superpages store GFNs in 32-bit page_info fields. */ - bits = min(bits, 32U + PAGE_SHIFT); - } - else - { - /* Both p2m-ept and p2m-pt only support 4-level page tables. */ - bits = min(bits, 48U); - } - } - - return bits; -} - -/* Flush selected vCPUs TLBs. NULL for all. */ -static inline bool paging_flush_tlb(const unsigned long *vcpu_bitmap) -{ - return paging_get_hostmode(current)->flush_tlb(vcpu_bitmap); -} - -#endif /* XEN_PAGING_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/pci.h b/xen/include/asm-x86/pci.h deleted file mode 100644 index 443f25347d..0000000000 --- a/xen/include/asm-x86/pci.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef __X86_PCI_H__ -#define __X86_PCI_H__ - -#define CF8_BDF(cf8) ( ((cf8) & 0x00ffff00) >> 8) -#define CF8_ADDR_LO(cf8) ( (cf8) & 0x000000fc) -#define CF8_ADDR_HI(cf8) ( ((cf8) & 0x0f000000) >> 16) -#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000)) - -#define IS_SNB_GFX(id) (id == 0x01068086 || id == 0x01168086 \ - || id == 0x01268086 || id == 0x01028086 \ - || id == 0x01128086 || id == 0x01228086 \ - || id == 0x010A8086 ) - -struct arch_pci_dev { - vmask_t used_vectors; -}; - -int pci_conf_write_intercept(unsigned int seg, unsigned int bdf, - unsigned int reg, unsigned int size, - uint32_t *data); -int pci_msi_conf_write_intercept(struct pci_dev *, unsigned int reg, - unsigned int size, uint32_t *data); -bool_t pci_mmcfg_decode(unsigned long mfn, unsigned int *seg, - unsigned int *bdf); - -bool_t pci_ro_mmcfg_decode(unsigned long mfn, unsigned int *seg, - unsigned int *bdf); - -/* MMCFG external variable defines */ -extern int pci_mmcfg_config_num; -extern struct acpi_mcfg_allocation *pci_mmcfg_config; - -/* Unlike ARM, PCI passthrough is always enabled for x86. */ -static always_inline bool is_pci_passthrough_enabled(void) -{ - return true; -} - -static inline void arch_pci_init_pdev(struct pci_dev *pdev) {} - -#endif /* __X86_PCI_H__ */ diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h deleted file mode 100644 index 2b0c29a233..0000000000 --- a/xen/include/asm-x86/percpu.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __X86_PERCPU_H__ -#define __X86_PERCPU_H__ - -#ifndef __ASSEMBLY__ -extern char __per_cpu_start[], __per_cpu_data_end[]; -extern unsigned long __per_cpu_offset[NR_CPUS]; -void percpu_init_areas(void); -#endif - -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) \ - (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) -#define this_cpu(var) \ - (*RELOC_HIDE(&per_cpu__##var, get_cpu_info()->per_cpu_offset)) - -#define this_cpu_ptr(var) \ - (*RELOC_HIDE(var, get_cpu_info()->per_cpu_offset)) - -#define per_cpu_ptr(var, cpu) \ - (*RELOC_HIDE(var, __per_cpu_offset[cpu])) - -#endif /* __X86_PERCPU_H__ */ diff --git a/xen/include/asm-x86/perfc.h b/xen/include/asm-x86/perfc.h deleted file mode 100644 index a1a591e803..0000000000 --- a/xen/include/asm-x86/perfc.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_PERFC_H__ -#define __ASM_PERFC_H__ - -static inline void arch_perfc_reset(void) -{ -} - -static inline void arch_perfc_gather(void) -{ -} - -#endif diff --git a/xen/include/asm-x86/perfc_defn.h b/xen/include/asm-x86/perfc_defn.h deleted file mode 100644 index 1a9ea3f89e..0000000000 --- a/xen/include/asm-x86/perfc_defn.h +++ /dev/null @@ -1,120 +0,0 @@ -/* This file is legitimately included multiple times. */ -/*#ifndef __XEN_PERFC_DEFN_H__*/ -/*#define __XEN_PERFC_DEFN_H__*/ - -PERFCOUNTER_ARRAY(exceptions, "exceptions", 32) - -#define VMX_PERF_EXIT_REASON_SIZE 56 -#define VMX_PERF_VECTOR_SIZE 0x20 -PERFCOUNTER_ARRAY(vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE) -PERFCOUNTER_ARRAY(cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE) - -#define VMEXIT_NPF_PERFC 141 -#define SVM_PERF_EXIT_REASON_SIZE (1+141) -PERFCOUNTER_ARRAY(svmexits, "SVMexits", SVM_PERF_EXIT_REASON_SIZE) - -PERFCOUNTER(seg_fixups, "segmentation fixups") - -PERFCOUNTER(apic_timer, "apic timer interrupts") - -PERFCOUNTER(domain_page_tlb_flush, "domain page tlb flushes") - -PERFCOUNTER(calls_to_mmuext_op, "calls to mmuext_op") -PERFCOUNTER(num_mmuext_ops, "mmuext ops") -PERFCOUNTER(calls_to_mmu_update, "calls to mmu_update") -PERFCOUNTER(num_page_updates, "page updates") -PERFCOUNTER(writable_mmu_updates, "mmu_updates of writable pages") -PERFCOUNTER(calls_to_update_va, "calls to update_va_map") -PERFCOUNTER(page_faults, "page faults") -PERFCOUNTER(copy_user_faults, "copy_user faults") - -PERFCOUNTER(map_domain_page_count, "map_domain_page count") -PERFCOUNTER(ptwr_emulations, "writable pt emulations") -PERFCOUNTER(mmio_ro_emulations, "mmio ro emulations") - -PERFCOUNTER(exception_fixed, "pre-exception fixed") - -PERFCOUNTER(guest_walk, "guest pagetable walks") - -/* Shadow counters */ -PERFCOUNTER(shadow_alloc, "calls to shadow_alloc") -PERFCOUNTER(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs") - -/* STATUS counters do not reset when 'P' is hit */ -PERFSTATUS(shadow_alloc_count, "number of shadow pages in use") -PERFCOUNTER(shadow_free, "calls to shadow_free") -PERFCOUNTER(shadow_prealloc_1, "shadow recycles old shadows") -PERFCOUNTER(shadow_prealloc_2, "shadow recycles in-use shadows") -PERFCOUNTER(shadow_linear_map_failed, "shadow hit read-only linear map") -PERFCOUNTER(shadow_a_update, "shadow A bit update") -PERFCOUNTER(shadow_ad_update, "shadow A&D bit update") -PERFCOUNTER(shadow_fault, "calls to shadow_fault") -PERFCOUNTER(shadow_fault_fast_gnp, "shadow_fault fast path n/p") -PERFCOUNTER(shadow_fault_fast_mmio, "shadow_fault fast path mmio") -PERFCOUNTER(shadow_fault_fast_fail, "shadow_fault fast path error") -PERFCOUNTER(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn") -PERFCOUNTER(shadow_fault_bail_real_fault, - "shadow_fault really guest fault") -PERFCOUNTER(shadow_fault_emulate_read, "shadow_fault emulates a read") -PERFCOUNTER(shadow_fault_emulate_write, "shadow_fault emulates a write") -PERFCOUNTER(shadow_fault_emulate_failed, "shadow_fault emulator fails") -PERFCOUNTER(shadow_fault_emulate_stack, "shadow_fault emulate stack write") -PERFCOUNTER(shadow_fault_emulate_wp, "shadow_fault emulate for CR0.WP=0") -PERFCOUNTER(shadow_fault_fast_emulate, "shadow_fault fast emulate") -PERFCOUNTER(shadow_fault_fast_emulate_fail, - "shadow_fault fast emulate failed") -PERFCOUNTER(shadow_fault_mmio, "shadow_fault handled as mmio") -PERFCOUNTER(shadow_fault_fixed, "shadow_fault fixed fault") -PERFCOUNTER(shadow_ptwr_emulate, "shadow causes ptwr to emulate") -PERFCOUNTER(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e") -PERFCOUNTER(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e") -PERFCOUNTER(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e") -PERFCOUNTER(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e") -PERFCOUNTER(shadow_hash_lookups, "calls to shadow_hash_lookup") -PERFCOUNTER(shadow_hash_lookup_head, "shadow hash hit in bucket head") -PERFCOUNTER(shadow_hash_lookup_miss, "shadow hash misses") -PERFCOUNTER(shadow_get_shadow_status, "calls to get_shadow_status") -PERFCOUNTER(shadow_hash_inserts, "calls to shadow_hash_insert") -PERFCOUNTER(shadow_hash_deletes, "calls to shadow_hash_delete") -PERFCOUNTER(shadow_writeable, "shadow removes write access") -PERFCOUNTER(shadow_writeable_h_1, "shadow writeable: 32b w2k3") -PERFCOUNTER(shadow_writeable_h_2, "shadow writeable: 32pae w2k3") -PERFCOUNTER(shadow_writeable_h_3, "shadow writeable: 64b w2k3") -PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: linux low/solaris") -PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: linux high") -PERFCOUNTER(shadow_writeable_h_6, "shadow writeable: FreeBSD") -PERFCOUNTER(shadow_writeable_h_7, "shadow writeable: sl1p") -PERFCOUNTER(shadow_writeable_h_8, "shadow writeable: sl1p failed") -PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force") -PERFCOUNTER(shadow_writeable_bf_1, "shadow writeable resync bf") -PERFCOUNTER(shadow_mappings, "shadow removes all mappings") -PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force") -PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit") -PERFCOUNTER(shadow_unshadow, "shadow unshadows a page") -PERFCOUNTER(shadow_up_pointer, "shadow unshadow by up-pointer") -PERFCOUNTER(shadow_unshadow_bf, "shadow unshadow brute-force") -PERFCOUNTER(shadow_get_page_fail, "shadow_get_page_from_l1e failed") -PERFCOUNTER(shadow_check_gwalk, "shadow checks gwalk") -PERFCOUNTER(shadow_inconsistent_gwalk, "shadow check inconsistent gwalk") -PERFCOUNTER(shadow_rm_write_flush_tlb, - "shadow flush tlb by removing write perm") - -PERFCOUNTER(shadow_invlpg, "shadow emulates invlpg") -PERFCOUNTER(shadow_invlpg_fault, "shadow invlpg faults") - -PERFCOUNTER(shadow_em_ex_pt, "shadow extra pt write") -PERFCOUNTER(shadow_em_ex_non_pt, "shadow extra non-pt-write op") -PERFCOUNTER(shadow_em_ex_fail, "shadow extra emulation failed") - -PERFCOUNTER(shadow_oos_fixup_add, "shadow OOS fixup adds") -PERFCOUNTER(shadow_oos_fixup_evict,"shadow OOS fixup evictions") -PERFCOUNTER(shadow_unsync, "shadow OOS unsyncs") -PERFCOUNTER(shadow_unsync_evict, "shadow OOS evictions") -PERFCOUNTER(shadow_resync, "shadow OOS resyncs") - -PERFCOUNTER(realmode_emulations, "realmode instructions emulated") -PERFCOUNTER(realmode_exits, "vmexits from realmode") - -PERFCOUNTER(pauseloop_exits, "vmexits from Pause-Loop Detection") - -/*#endif*/ /* __XEN_PERFC_DEFN_H__ */ diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h deleted file mode 100644 index 400b4fac5e..0000000000 --- a/xen/include/asm-x86/processor.h +++ /dev/null @@ -1,650 +0,0 @@ - -/* Portions are: Copyright (c) 1994 Linus Torvalds */ - -#ifndef __ASM_X86_PROCESSOR_H -#define __ASM_X86_PROCESSOR_H - -#ifndef __ASSEMBLY__ -#include -#include -#include -#include -#include -#include -#include -#endif - -#include -#include - -/* - * Trap/fault mnemonics. - */ -#define TRAP_divide_error 0 -#define TRAP_debug 1 -#define TRAP_nmi 2 -#define TRAP_int3 3 -#define TRAP_overflow 4 -#define TRAP_bounds 5 -#define TRAP_invalid_op 6 -#define TRAP_no_device 7 -#define TRAP_double_fault 8 -#define TRAP_copro_seg 9 -#define TRAP_invalid_tss 10 -#define TRAP_no_segment 11 -#define TRAP_stack_error 12 -#define TRAP_gp_fault 13 -#define TRAP_page_fault 14 -#define TRAP_spurious_int 15 -#define TRAP_copro_error 16 -#define TRAP_alignment_check 17 -#define TRAP_machine_check 18 -#define TRAP_simd_error 19 -#define TRAP_virtualisation 20 -#define TRAP_nr 32 - -#define TRAP_HAVE_EC X86_EXC_HAVE_EC - -/* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */ -/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */ -#define TRAP_syscall 256 - -/* Boolean return code: the reason for a fault has been fixed. */ -#define EXCRET_fault_fixed 1 - -/* 'trap_bounce' flags values */ -#define TBF_EXCEPTION 1 -#define TBF_EXCEPTION_ERRCODE 2 -#define TBF_INTERRUPT 8 - -/* 'arch_vcpu' flags values */ -#define _TF_kernel_mode 0 -#define TF_kernel_mode (1<<_TF_kernel_mode) - -/* #PF error code values. */ -#define PFEC_page_present (_AC(1,U) << 0) -#define PFEC_write_access (_AC(1,U) << 1) -#define PFEC_user_mode (_AC(1,U) << 2) -#define PFEC_reserved_bit (_AC(1,U) << 3) -#define PFEC_insn_fetch (_AC(1,U) << 4) -#define PFEC_prot_key (_AC(1,U) << 5) -#define PFEC_shstk (_AC(1,U) << 6) -#define PFEC_arch_mask (_AC(0xffff,U)) /* Architectural PFEC values. */ -/* Internally used only flags. */ -#define PFEC_page_paged (1U<<16) -#define PFEC_page_shared (1U<<17) -#define PFEC_implicit (1U<<18) /* Pagewalk input for ldt/gdt/idt/tr accesses. */ -#define PFEC_synth_mask (~PFEC_arch_mask) /* Synthetic PFEC values. */ - -/* Other exception error code values. */ -#define X86_XEC_EXT (_AC(1,U) << 0) -#define X86_XEC_IDT (_AC(1,U) << 1) -#define X86_XEC_TI (_AC(1,U) << 2) - -#define XEN_MINIMAL_CR4 (X86_CR4_PGE | X86_CR4_PAE) - -#define XEN_CR4_PV32_BITS (X86_CR4_SMEP|X86_CR4_SMAP) - -/* Common SYSCALL parameters. */ -#define XEN_MSR_STAR (((uint64_t)FLAT_RING3_CS32 << 48) | \ - ((uint64_t)__HYPERVISOR_CS << 32)) -#define XEN_SYSCALL_MASK (X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF| \ - X86_EFLAGS_NT|X86_EFLAGS_DF|X86_EFLAGS_IF| \ - X86_EFLAGS_TF) - -/* - * Host IA32_CR_PAT value to cover all memory types. This is not the default - * MSR_PAT value, and is an ABI with PV guests. - */ -#define XEN_MSR_PAT _AC(0x050100070406, ULL) - -#ifndef __ASSEMBLY__ - -struct domain; -struct vcpu; - -struct x86_cpu_id { - uint16_t vendor; - uint16_t family; - uint16_t model; - uint16_t feature; /* bit index */ - const void *driver_data; -}; - -struct cpuinfo_x86 { - __u8 x86; /* CPU family */ - __u8 x86_vendor; /* CPU vendor */ - __u8 x86_model; - __u8 x86_mask; - int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ - __u32 extended_cpuid_level; /* Maximum supported CPUID extended level */ - unsigned int x86_capability[NCAPINTS]; - char x86_vendor_id[16]; - char x86_model_id[64]; - int x86_cache_size; /* in KB - valid for CPUS which support this call */ - int x86_cache_alignment; /* In bytes */ - __u32 x86_max_cores; /* cpuid returned max cores value */ - __u32 booted_cores; /* number of cores as seen by OS */ - __u32 x86_num_siblings; /* cpuid logical cpus per chip value */ - __u32 apicid; - __u32 phys_proc_id; /* package ID of each logical CPU */ - __u32 cpu_core_id; /* core ID of each logical CPU*/ - __u32 compute_unit_id; /* AMD compute unit ID of each logical CPU */ - unsigned short x86_clflush_size; -} __cacheline_aligned; - -/* - * capabilities of CPUs - */ - -extern struct cpuinfo_x86 boot_cpu_data; - -extern struct cpuinfo_x86 cpu_data[]; -#define current_cpu_data cpu_data[smp_processor_id()] - -extern bool probe_cpuid_faulting(void); -extern void ctxt_switch_levelling(const struct vcpu *next); -extern void (*ctxt_switch_masking)(const struct vcpu *next); - -extern bool_t opt_cpu_info; -extern u32 trampoline_efer; -extern u64 trampoline_misc_enable_off; - -/* Maximum width of physical addresses supported by the hardware. */ -extern unsigned int paddr_bits; -/* Max physical address width supported within HAP guests. */ -extern unsigned int hap_paddr_bits; -/* Maximum width of virtual addresses supported by the hardware. */ -extern unsigned int vaddr_bits; - -extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[]); - -extern void identify_cpu(struct cpuinfo_x86 *); -extern void setup_clear_cpu_cap(unsigned int); -extern void setup_force_cpu_cap(unsigned int); -extern bool is_forced_cpu_cap(unsigned int); -extern void print_cpu_info(unsigned int cpu); -extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); - -#define cpu_to_core(_cpu) (cpu_data[_cpu].cpu_core_id) -#define cpu_to_socket(_cpu) (cpu_data[_cpu].phys_proc_id) - -unsigned int apicid_to_socket(unsigned int); - -static inline int cpu_nr_siblings(unsigned int cpu) -{ - return cpu_data[cpu].x86_num_siblings; -} - -/* - * Generic CPUID function - * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx - * resulting in stale register contents being returned. - */ -#define cpuid(_op,_eax,_ebx,_ecx,_edx) \ - asm volatile ( "cpuid" \ - : "=a" (*(int *)(_eax)), \ - "=b" (*(int *)(_ebx)), \ - "=c" (*(int *)(_ecx)), \ - "=d" (*(int *)(_edx)) \ - : "0" (_op), "2" (0) ) - -/* Some CPUID calls want 'count' to be placed in ecx */ -static inline void cpuid_count( - unsigned int op, - unsigned int count, - unsigned int *eax, - unsigned int *ebx, - unsigned int *ecx, - unsigned int *edx) -{ - asm volatile ( "cpuid" - : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) - : "0" (op), "c" (count) ); -} - -/* - * CPUID functions returning a single datum - */ -static always_inline unsigned int cpuid_eax(unsigned int op) -{ - unsigned int eax; - - asm volatile ( "cpuid" - : "=a" (eax) - : "0" (op) - : "bx", "cx", "dx" ); - return eax; -} - -static always_inline unsigned int cpuid_ebx(unsigned int op) -{ - unsigned int eax, ebx; - - asm volatile ( "cpuid" - : "=a" (eax), "=b" (ebx) - : "0" (op) - : "cx", "dx" ); - return ebx; -} - -static always_inline unsigned int cpuid_ecx(unsigned int op) -{ - unsigned int eax, ecx; - - asm volatile ( "cpuid" - : "=a" (eax), "=c" (ecx) - : "0" (op) - : "bx", "dx" ); - return ecx; -} - -static always_inline unsigned int cpuid_edx(unsigned int op) -{ - unsigned int eax, edx; - - asm volatile ( "cpuid" - : "=a" (eax), "=d" (edx) - : "0" (op) - : "bx", "cx" ); - return edx; -} - -static always_inline unsigned int cpuid_count_ebx( - unsigned int leaf, unsigned int subleaf) -{ - unsigned int ebx, tmp; - - cpuid_count(leaf, subleaf, &tmp, &ebx, &tmp, &tmp); - - return ebx; -} - -static always_inline unsigned int cpuid_count_edx( - unsigned int leaf, unsigned int subleaf) -{ - unsigned int edx, tmp; - - cpuid_count(leaf, subleaf, &tmp, &tmp, &tmp, &edx); - - return edx; -} - -static inline unsigned long read_cr0(void) -{ - unsigned long cr0; - asm volatile ( "mov %%cr0,%0\n\t" : "=r" (cr0) ); - return cr0; -} - -static inline void write_cr0(unsigned long val) -{ - asm volatile ( "mov %0,%%cr0" : : "r" ((unsigned long)val) ); -} - -static inline unsigned long read_cr2(void) -{ - unsigned long cr2; - asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) ); - return cr2; -} - -static inline void write_cr3(unsigned long val) -{ - asm volatile ( "mov %0, %%cr3" : : "r" (val) : "memory" ); -} - -static inline unsigned long cr3_pa(unsigned long cr3) -{ - return cr3 & X86_CR3_ADDR_MASK; -} - -static inline unsigned int cr3_pcid(unsigned long cr3) -{ - return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0; -} - -static inline unsigned long read_cr4(void) -{ - return get_cpu_info()->cr4; -} - -static inline void write_cr4(unsigned long val) -{ - struct cpu_info *info = get_cpu_info(); - -#ifdef CONFIG_PV - /* No global pages in case of PCIDs enabled! */ - ASSERT(!(val & X86_CR4_PGE) || !(val & X86_CR4_PCIDE)); -#else - ASSERT(!(val & X86_CR4_PCIDE)); -#endif - - /* - * On hardware supporting FSGSBASE, the value in %cr4 is the kernel's - * choice for 64bit PV guests, which impacts whether Xen can use the - * instructions. - * - * The {rd,wr}{fs,gs}base() helpers use info->cr4 to work out whether it - * is safe to execute the {RD,WR}{FS,GS}BASE instruction, falling back to - * the MSR path if not. Some users require interrupt safety. - * - * If FSGSBASE is currently or about to become clear, reflect this in - * info->cr4 before updating %cr4, so an interrupt which hits in the - * middle won't observe FSGSBASE set in info->cr4 but clear in %cr4. - */ - info->cr4 = val & (info->cr4 | ~X86_CR4_FSGSBASE); - - asm volatile ( "mov %[val], %%cr4" - : "+m" (info->cr4) /* Force ordering without a barrier. */ - : [val] "r" (val) ); - - info->cr4 = val; -} - -/* Clear and set 'TS' bit respectively */ -static inline void clts(void) -{ - asm volatile ( "clts" ); -} - -static inline void stts(void) -{ - write_cr0(X86_CR0_TS|read_cr0()); -} - -/* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; - -static always_inline void set_in_cr4 (unsigned long mask) -{ - mmu_cr4_features |= mask; - write_cr4(read_cr4() | mask); -} - -static inline unsigned int rdpkru(void) -{ - unsigned int pkru; - - asm volatile (".byte 0x0f,0x01,0xee" - : "=a" (pkru) : "c" (0) : "dx"); - - return pkru; -} - -static inline void wrpkru(unsigned int pkru) -{ - asm volatile ( ".byte 0x0f, 0x01, 0xef" - :: "a" (pkru), "d" (0), "c" (0) ); -} - -/* Macros for PKRU domain */ -#define PKRU_READ (0) -#define PKRU_WRITE (1) -#define PKRU_ATTRS (2) - -/* - * PKRU defines 32 bits, there are 16 domains and 2 attribute bits per - * domain in pkru, pkeys is index to a defined domain, so the value of - * pte_pkeys * PKRU_ATTRS + R/W is offset of a defined domain attribute. - */ -static inline bool_t read_pkru_ad(uint32_t pkru, unsigned int pkey) -{ - ASSERT(pkey < 16); - return (pkru >> (pkey * PKRU_ATTRS + PKRU_READ)) & 1; -} - -static inline bool_t read_pkru_wd(uint32_t pkru, unsigned int pkey) -{ - ASSERT(pkey < 16); - return (pkru >> (pkey * PKRU_ATTRS + PKRU_WRITE)) & 1; -} - -static always_inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) -{ - /* "monitor %eax,%ecx,%edx;" */ - asm volatile ( - ".byte 0x0f,0x01,0xc8;" - : : "a" (eax), "c" (ecx), "d"(edx) ); -} - -static always_inline void __mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax,%ecx;" */ - asm volatile ( - ".byte 0x0f,0x01,0xc9;" - : : "a" (eax), "c" (ecx) ); -} - -#define IOBMP_BYTES 8192 -#define IOBMP_INVALID_OFFSET 0x8000 - -struct __packed tss64 { - uint32_t :32; - uint64_t rsp0, rsp1, rsp2; - uint64_t :64; - /* - * Interrupt Stack Table is 1-based so tss->ist[0] corresponds to an IST - * value of 1 in an Interrupt Descriptor. - */ - uint64_t ist[7]; - uint64_t :64; - uint16_t :16, bitmap; -}; -struct tss_page { - uint64_t __aligned(PAGE_SIZE) ist_ssp[8]; - struct tss64 tss; -}; -DECLARE_PER_CPU(struct tss_page, tss_page); - -#define IST_NONE 0UL -#define IST_MCE 1UL -#define IST_NMI 2UL -#define IST_DB 3UL -#define IST_DF 4UL -#define IST_MAX 4UL - -/* Set the Interrupt Stack Table used by a particular IDT entry. */ -static inline void set_ist(idt_entry_t *idt, unsigned int ist) -{ - /* IST is a 3 bit field, 32 bits into the IDT entry. */ - ASSERT(ist <= IST_MAX); - - /* Typically used on a live idt. Disuade any clever optimisations. */ - ACCESS_ONCE(idt->ist) = ist; -} - -static inline void enable_each_ist(idt_entry_t *idt) -{ - set_ist(&idt[TRAP_double_fault], IST_DF); - set_ist(&idt[TRAP_nmi], IST_NMI); - set_ist(&idt[TRAP_machine_check], IST_MCE); - set_ist(&idt[TRAP_debug], IST_DB); -} - -static inline void disable_each_ist(idt_entry_t *idt) -{ - set_ist(&idt[TRAP_double_fault], IST_NONE); - set_ist(&idt[TRAP_nmi], IST_NONE); - set_ist(&idt[TRAP_machine_check], IST_NONE); - set_ist(&idt[TRAP_debug], IST_NONE); -} - -#define IDT_ENTRIES 256 -extern idt_entry_t idt_table[]; -extern idt_entry_t *idt_tables[]; - -DECLARE_PER_CPU(root_pgentry_t *, root_pgt); - -extern void write_ptbase(struct vcpu *v); - -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static always_inline void rep_nop(void) -{ - asm volatile ( "rep;nop" : : : "memory" ); -} - -#define cpu_relax() rep_nop() - -void show_code(const struct cpu_user_regs *regs); -void show_stack_overflow(unsigned int cpu, const struct cpu_user_regs *regs); -void show_registers(const struct cpu_user_regs *regs); -void show_execution_state(const struct cpu_user_regs *regs); -#define dump_execution_state() run_in_exception_handler(show_execution_state) -void show_page_walk(unsigned long addr); -void noreturn fatal_trap(const struct cpu_user_regs *regs, bool_t show_remote); - -extern void mtrr_ap_init(void); -extern void mtrr_bp_init(void); - -void mcheck_init(struct cpuinfo_x86 *c, bool_t bsp); - -/* Dispatch table for exceptions */ -extern void (* const exception_table[TRAP_nr])(struct cpu_user_regs *regs); - -#define DECLARE_TRAP_HANDLER(_name) \ - void _name(void); \ - void do_ ## _name(struct cpu_user_regs *regs) -#define DECLARE_TRAP_HANDLER_CONST(_name) \ - void _name(void); \ - void do_ ## _name(const struct cpu_user_regs *regs) - -DECLARE_TRAP_HANDLER(divide_error); -DECLARE_TRAP_HANDLER(debug); -DECLARE_TRAP_HANDLER_CONST(nmi); -DECLARE_TRAP_HANDLER(int3); -DECLARE_TRAP_HANDLER(overflow); -DECLARE_TRAP_HANDLER(bounds); -DECLARE_TRAP_HANDLER(invalid_op); -DECLARE_TRAP_HANDLER(device_not_available); -DECLARE_TRAP_HANDLER(double_fault); -DECLARE_TRAP_HANDLER(invalid_TSS); -DECLARE_TRAP_HANDLER(segment_not_present); -DECLARE_TRAP_HANDLER(stack_segment); -DECLARE_TRAP_HANDLER(general_protection); -DECLARE_TRAP_HANDLER(page_fault); -DECLARE_TRAP_HANDLER(early_page_fault); -DECLARE_TRAP_HANDLER(coprocessor_error); -DECLARE_TRAP_HANDLER(simd_coprocessor_error); -DECLARE_TRAP_HANDLER_CONST(machine_check); -DECLARE_TRAP_HANDLER(alignment_check); -DECLARE_TRAP_HANDLER(entry_CP); - -DECLARE_TRAP_HANDLER(entry_int82); - -#undef DECLARE_TRAP_HANDLER_CONST -#undef DECLARE_TRAP_HANDLER - -void trap_nop(void); - -static inline void enable_nmis(void) -{ - unsigned long tmp; - - asm volatile ( "mov %%rsp, %[rsp] \n\t" - "lea .Ldone(%%rip), %[rip] \n\t" -#ifdef CONFIG_XEN_SHSTK - /* Check for CET-SS being active. */ - "mov $1, %k[ssp] \n\t" - "rdsspq %[ssp] \n\t" - "cmp $1, %k[ssp] \n\t" - "je .Lshstk_done \n\t" - - /* Push 3 words on the shadow stack */ - ".rept 3 \n\t" - "call 1f; nop; 1: \n\t" - ".endr \n\t" - - /* Fixup to be an IRET shadow stack frame */ - "wrssq %q[cs], -1*8(%[ssp]) \n\t" - "wrssq %[rip], -2*8(%[ssp]) \n\t" - "wrssq %[ssp], -3*8(%[ssp]) \n\t" - - ".Lshstk_done:" -#endif - /* Write an IRET regular frame */ - "push %[ss] \n\t" - "push %[rsp] \n\t" - "pushf \n\t" - "push %q[cs] \n\t" - "push %[rip] \n\t" - "iretq \n\t" - ".Ldone: \n\t" - : [rip] "=&r" (tmp), - [rsp] "=&r" (tmp), - [ssp] "=&r" (tmp) - : [ss] "i" (__HYPERVISOR_DS), - [cs] "r" (__HYPERVISOR_CS) ); -} - -void sysenter_entry(void); -void sysenter_eflags_saved(void); -void int80_direct_trap(void); - -struct stubs { - union { - void(*func)(void); - unsigned long addr; - }; - unsigned long mfn; -}; - -DECLARE_PER_CPU(struct stubs, stubs); -unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn); - -void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf, - uint32_t subleaf, struct cpuid_leaf *res); -int guest_rdmsr_xen(const struct vcpu *v, uint32_t idx, uint64_t *val); -int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val); - -static inline uint8_t get_cpu_family(uint32_t raw, uint8_t *model, - uint8_t *stepping) -{ - uint8_t fam = (raw >> 8) & 0xf; - - if ( fam == 0xf ) - fam += (raw >> 20) & 0xff; - - if ( model ) - { - uint8_t mod = (raw >> 4) & 0xf; - - if ( fam >= 0x6 ) - mod |= (raw >> 12) & 0xf0; - - *model = mod; - } - if ( stepping ) - *stepping = raw & 0xf; - return fam; -} - -extern int8_t opt_tsx, cpu_has_tsx_ctrl; -extern bool rtm_disabled; -void tsx_init(void); - -enum ap_boot_method { - AP_BOOT_NORMAL, - AP_BOOT_SKINIT, -}; -extern enum ap_boot_method ap_boot_method; - -#endif /* !__ASSEMBLY__ */ - -#endif /* __ASM_X86_PROCESSOR_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/psr.h b/xen/include/asm-x86/psr.h deleted file mode 100644 index c2257da7fc..0000000000 --- a/xen/include/asm-x86/psr.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * psr.h: Platform Shared Resource related service for guest. - * - * Copyright (c) 2014, Intel Corporation - * Author: Dongxiao Xu - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ -#ifndef __ASM_PSR_H__ -#define __ASM_PSR_H__ - -#include - -/* CAT cpuid level */ -#define PSR_CPUID_LEVEL_CAT 0x10 - -/* Resource Type Enumeration */ -#define PSR_RESOURCE_TYPE_L3 0x2 -#define PSR_RESOURCE_TYPE_L2 0x4 -#define PSR_RESOURCE_TYPE_MBA 0x8 - -/* L3 Monitoring Features */ -#define PSR_CMT_L3_OCCUPANCY 0x1 - -/* CDP Capability */ -#define PSR_CAT_CDP_CAPABILITY (1u << 2) - -/* L3 CDP Enable bit*/ -#define PSR_L3_QOS_CDP_ENABLE_BIT 0x0 - -/* Used by psr_get_info() */ -#define PSR_INFO_IDX_COS_MAX 0 -#define PSR_INFO_IDX_CAT_CBM_LEN 1 -#define PSR_INFO_IDX_CAT_FLAGS 2 -#define PSR_INFO_IDX_MBA_THRTL_MAX 1 -#define PSR_INFO_IDX_MBA_FLAGS 2 -#define PSR_INFO_ARRAY_SIZE 3 - -struct psr_cmt_l3 { - unsigned int features; - unsigned int upscaling_factor; - unsigned int rmid_max; -}; - -struct psr_cmt { - unsigned int rmid_max; - unsigned int features; - domid_t *rmid_to_dom; - struct psr_cmt_l3 l3; -}; - -enum psr_type { - PSR_TYPE_L3_CBM, - PSR_TYPE_L3_CODE, - PSR_TYPE_L3_DATA, - PSR_TYPE_L2_CBM, - PSR_TYPE_MBA_THRTL, - PSR_TYPE_UNKNOWN, -}; - -extern struct psr_cmt *psr_cmt; - -static inline bool_t psr_cmt_enabled(void) -{ - return !!psr_cmt; -} - -int psr_alloc_rmid(struct domain *d); -void psr_free_rmid(struct domain *d); -void psr_ctxt_switch_to(struct domain *d); - -int psr_get_info(unsigned int socket, enum psr_type type, - uint32_t data[], unsigned int array_len); -int psr_get_val(struct domain *d, unsigned int socket, - uint32_t *val, enum psr_type type); -int psr_set_val(struct domain *d, unsigned int socket, - uint64_t val, enum psr_type type); - -void psr_domain_init(struct domain *d); -void psr_domain_free(struct domain *d); - -#endif /* __ASM_PSR_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/pv/domain.h b/xen/include/asm-x86/pv/domain.h deleted file mode 100644 index df9716ff26..0000000000 --- a/xen/include/asm-x86/pv/domain.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * pv/domain.h - * - * PV guest interface definitions - * - * Copyright (C) 2017 Wei Liu - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __X86_PV_DOMAIN_H__ -#define __X86_PV_DOMAIN_H__ - -#include - -#ifdef CONFIG_PV32 -extern int8_t opt_pv32; -#else -# define opt_pv32 false -#endif - -/* - * PCID values for the address spaces of 64-bit pv domains: - * - * We are using 4 PCID values for a 64 bit pv domain subject to XPTI: - * - hypervisor active and guest in kernel mode PCID 0 - * - hypervisor active and guest in user mode PCID 1 - * - guest active and in kernel mode PCID 2 - * - guest active and in user mode PCID 3 - * - * Without XPTI only 2 values are used: - * - guest in kernel mode PCID 0 - * - guest in user mode PCID 1 - */ - -#define PCID_PV_PRIV 0x0000 /* Used for other domains, too. */ -#define PCID_PV_USER 0x0001 -#define PCID_PV_XPTI 0x0002 /* To be ORed to above values. */ - -/* - * Return additional PCID specific cr3 bits. - * - * Note that X86_CR3_NOFLUSH will not be readable in cr3. Anyone consuming - * v->arch.cr3 should mask away X86_CR3_NOFLUSH and X86_CR3_PCIDMASK in case - * the value is used to address the root page table. - */ -static inline unsigned long get_pcid_bits(const struct vcpu *v, bool is_xpti) -{ -#ifdef CONFIG_PV - return X86_CR3_NOFLUSH | (is_xpti ? PCID_PV_XPTI : 0) | - ((v->arch.flags & TF_kernel_mode) ? PCID_PV_PRIV : PCID_PV_USER); -#else - ASSERT_UNREACHABLE(); - return 0; -#endif -} - -#ifdef CONFIG_PV - -void pv_vcpu_destroy(struct vcpu *v); -int pv_vcpu_initialise(struct vcpu *v); -void pv_domain_destroy(struct domain *d); -int pv_domain_initialise(struct domain *d); - -/* - * Bits which a PV guest can toggle in its view of cr4. Some are loaded into - * hardware, while some are fully emulated. - */ -#define PV_CR4_GUEST_MASK \ - (X86_CR4_TSD | X86_CR4_DE | X86_CR4_FSGSBASE | X86_CR4_OSXSAVE) - -/* Bits which a PV guest may observe from the real hardware settings. */ -#define PV_CR4_GUEST_VISIBLE_MASK \ - (X86_CR4_PAE | X86_CR4_MCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT) - -/* Given a new cr4 value, construct the resulting guest-visible cr4 value. */ -unsigned long pv_fixup_guest_cr4(const struct vcpu *v, unsigned long cr4); - -/* Create a cr4 value to load into hardware, based on vcpu settings. */ -unsigned long pv_make_cr4(const struct vcpu *v); - -bool xpti_pcid_enabled(void); - -#else /* !CONFIG_PV */ - -#include - -static inline void pv_vcpu_destroy(struct vcpu *v) {} -static inline int pv_vcpu_initialise(struct vcpu *v) { return -EOPNOTSUPP; } -static inline void pv_domain_destroy(struct domain *d) {} -static inline int pv_domain_initialise(struct domain *d) { return -EOPNOTSUPP; } - -static inline unsigned long pv_make_cr4(const struct vcpu *v) { return ~0ul; } - -#endif /* CONFIG_PV */ - -void paravirt_ctxt_switch_from(struct vcpu *v); -void paravirt_ctxt_switch_to(struct vcpu *v); - -#endif /* __X86_PV_DOMAIN_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/pv/grant_table.h b/xen/include/asm-x86/pv/grant_table.h deleted file mode 100644 index 85442b6074..0000000000 --- a/xen/include/asm-x86/pv/grant_table.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * asm-x86/pv/grant_table.h - * - * Grant table interfaces for PV guests - * - * Copyright (C) 2017 Wei Liu - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __X86_PV_GRANT_TABLE_H__ -#define __X86_PV_GRANT_TABLE_H__ - -#ifdef CONFIG_PV - -int create_grant_pv_mapping(uint64_t addr, mfn_t frame, - unsigned int flags, unsigned int cache_flags); -int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, - uint64_t new_addr, unsigned int flags); - -#else - -#include - -static inline int create_grant_pv_mapping(uint64_t addr, mfn_t frame, - unsigned int flags, - unsigned int cache_flags) -{ - return GNTST_general_error; -} - -static inline int replace_grant_pv_mapping(uint64_t addr, mfn_t frame, - uint64_t new_addr, unsigned int flags) -{ - return GNTST_general_error; -} - -#endif - -#endif /* __X86_PV_GRANT_TABLE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h deleted file mode 100644 index 9983f8257c..0000000000 --- a/xen/include/asm-x86/pv/mm.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * asm-x86/pv/mm.h - * - * Memory management interfaces for PV guests - * - * Copyright (C) 2017 Wei Liu - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __X86_PV_MM_H__ -#define __X86_PV_MM_H__ - -#ifdef CONFIG_PV - -int pv_ro_page_fault(unsigned long addr, struct cpu_user_regs *regs); - -int pv_set_gdt(struct vcpu *v, const unsigned long frames[], - unsigned int entries); -void pv_destroy_gdt(struct vcpu *v); - -bool pv_map_ldt_shadow_page(unsigned int off); -bool pv_destroy_ldt(struct vcpu *v); - -int validate_segdesc_page(struct page_info *page); - -#else - -#include -#include - -static inline int pv_ro_page_fault(unsigned long addr, - struct cpu_user_regs *regs) -{ - ASSERT_UNREACHABLE(); - return 0; -} - -static inline int pv_set_gdt(struct vcpu *v, const unsigned long frames[], - unsigned int entries) -{ ASSERT_UNREACHABLE(); return -EINVAL; } -static inline void pv_destroy_gdt(struct vcpu *v) { ASSERT_UNREACHABLE(); } - -static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; } -static inline bool pv_destroy_ldt(struct vcpu *v) -{ ASSERT_UNREACHABLE(); return false; } - -#endif - -#endif /* __X86_PV_MM_H__ */ diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h deleted file mode 100644 index 8a91f4f9df..0000000000 --- a/xen/include/asm-x86/pv/shim.h +++ /dev/null @@ -1,119 +0,0 @@ -/****************************************************************************** - * asm-x86/guest/shim.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - * - * Copyright (c) 2017 Citrix Systems Ltd. - */ - -#ifndef __X86_PV_SHIM_H__ -#define __X86_PV_SHIM_H__ - -#include - -#if defined(CONFIG_PV_SHIM_EXCLUSIVE) -# define pv_shim 1 -#elif defined(CONFIG_PV_SHIM) -extern bool pv_shim; -#else -# define pv_shim 0 -#endif /* CONFIG_PV_SHIM{,_EXCLUSIVE} */ - -#ifdef CONFIG_PV_SHIM - -void pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start, - unsigned long va_start, unsigned long store_va, - unsigned long console_va, unsigned long vphysmap, - start_info_t *si); -int pv_shim_shutdown(uint8_t reason); -void pv_shim_inject_evtchn(unsigned int port); -long pv_shim_cpu_up(void *data); -long pv_shim_cpu_down(void *data); -void pv_shim_online_memory(unsigned int nr, unsigned int order); -void pv_shim_offline_memory(unsigned int nr, unsigned int order); -domid_t get_initial_domain_id(void); -uint64_t pv_shim_mem(uint64_t avail); -void pv_shim_fixup_e820(struct e820map *e820); -const struct platform_bad_page *pv_shim_reserved_pages(unsigned int *size); - -#else - -static inline void pv_shim_setup_dom(struct domain *d, l4_pgentry_t *l4start, - unsigned long va_start, - unsigned long store_va, - unsigned long console_va, - unsigned long vphysmap, - start_info_t *si) -{ - ASSERT_UNREACHABLE(); -} -static inline int pv_shim_shutdown(uint8_t reason) -{ - ASSERT_UNREACHABLE(); - return 0; -} -static inline void pv_shim_inject_evtchn(unsigned int port) -{ - ASSERT_UNREACHABLE(); -} -static inline long pv_shim_cpu_up(void *data) -{ - ASSERT_UNREACHABLE(); - return 0; -} -static inline long pv_shim_cpu_down(void *data) -{ - ASSERT_UNREACHABLE(); - return 0; -} -static inline void pv_shim_online_memory(unsigned int nr, unsigned int order) -{ - ASSERT_UNREACHABLE(); -} -static inline void pv_shim_offline_memory(unsigned int nr, unsigned int order) -{ - ASSERT_UNREACHABLE(); -} -static inline domid_t get_initial_domain_id(void) -{ - return 0; -} -static inline uint64_t pv_shim_mem(uint64_t avail) -{ - ASSERT_UNREACHABLE(); - return 0; -} -static inline void pv_shim_fixup_e820(struct e820map *e820) -{ - ASSERT_UNREACHABLE(); -} -static inline const struct platform_bad_page * -pv_shim_reserved_pages(unsigned int *s) -{ - ASSERT_UNREACHABLE(); - return NULL; -} - -#endif - -#endif /* __X86_PV_SHIM_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/pv/trace.h b/xen/include/asm-x86/pv/trace.h deleted file mode 100644 index c616206eeb..0000000000 --- a/xen/include/asm-x86/pv/trace.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef XEN_X86_PV_TRACE_H -#define XEN_X86_PV_TRACE_H - -#include - -#include - -void __trace_pv_trap(int trapnr, unsigned long eip, - int use_error_code, unsigned error_code); -static inline void trace_pv_trap(int trapnr, unsigned long eip, - int use_error_code, unsigned error_code) -{ - if ( unlikely(tb_init_done) ) - __trace_pv_trap(trapnr, eip, use_error_code, error_code); -} - -void __trace_pv_page_fault(unsigned long addr, unsigned error_code); -static inline void trace_pv_page_fault(unsigned long addr, - unsigned error_code) -{ - if ( unlikely(tb_init_done) ) - __trace_pv_page_fault(addr, error_code); -} - -void __trace_trap_one_addr(unsigned event, unsigned long va); -static inline void trace_trap_one_addr(unsigned event, unsigned long va) -{ - if ( unlikely(tb_init_done) ) - __trace_trap_one_addr(event, va); -} - -void __trace_trap_two_addr(unsigned event, unsigned long va1, - unsigned long va2); -static inline void trace_trap_two_addr(unsigned event, unsigned long va1, - unsigned long va2) -{ - if ( unlikely(tb_init_done) ) - __trace_trap_two_addr(event, va1, va2); -} - -void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte); -static inline void trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) -{ - if ( unlikely(tb_init_done) ) - __trace_ptwr_emulation(addr, npte); -} - -#endif /* XEN_X86_PV_TRACE_H */ diff --git a/xen/include/asm-x86/pv/traps.h b/xen/include/asm-x86/pv/traps.h deleted file mode 100644 index 855203c4e2..0000000000 --- a/xen/include/asm-x86/pv/traps.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * pv/traps.h - * - * PV guest traps interface definitions - * - * Copyright (C) 2017 Wei Liu - * - * This program is free software; you can redistribute it and/or - * modify it under the terms and conditions of the GNU General Public - * License, version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; If not, see . - */ - -#ifndef __X86_PV_TRAPS_H__ -#define __X86_PV_TRAPS_H__ - -#ifdef CONFIG_PV - -#include - -void pv_trap_init(void); - -int pv_raise_nmi(struct vcpu *v); - -int pv_emulate_privileged_op(struct cpu_user_regs *regs); -void pv_emulate_gate_op(struct cpu_user_regs *regs); -bool pv_emulate_invalid_op(struct cpu_user_regs *regs); - -static inline bool pv_trap_callback_registered(const struct vcpu *v, - uint8_t vector) -{ - return v->arch.pv.trap_ctxt[vector].address; -} - -#else /* !CONFIG_PV */ - -#include - -static inline void pv_trap_init(void) {} - -static inline int pv_raise_nmi(struct vcpu *v) { return -EOPNOTSUPP; } - -static inline int pv_emulate_privileged_op(struct cpu_user_regs *regs) { return 0; } -static inline void pv_emulate_gate_op(struct cpu_user_regs *regs) {} -static inline bool pv_emulate_invalid_op(struct cpu_user_regs *regs) { return true; } - -static inline bool pv_trap_callback_registered(const struct vcpu *v, - uint8_t vector) -{ - return false; -} -#endif /* CONFIG_PV */ - -#endif /* __X86_PV_TRAPS_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/random.h b/xen/include/asm-x86/random.h deleted file mode 100644 index 9e1fe0bc1d..0000000000 --- a/xen/include/asm-x86/random.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __ASM_RANDOM_H__ -#define __ASM_RANDOM_H__ - -#include - -static inline unsigned int arch_get_random(void) -{ - unsigned int val = 0; - - if ( cpu_has(¤t_cpu_data, X86_FEATURE_RDRAND) ) - asm volatile ( ".byte 0x0f,0xc7,0xf0" : "+a" (val) ); - - return val; -} - -#endif /* __ASM_RANDOM_H__ */ diff --git a/xen/include/asm-x86/regs.h b/xen/include/asm-x86/regs.h deleted file mode 100644 index 3fb94deedc..0000000000 --- a/xen/include/asm-x86/regs.h +++ /dev/null @@ -1,33 +0,0 @@ - -#ifndef __X86_REGS_H__ -#define __X86_REGS_H__ - -#include - -#define guest_mode(r) \ -({ \ - unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r); \ - /* Frame pointer must point into current CPU stack. */ \ - ASSERT(diff < STACK_SIZE); \ - /* If not a guest frame, it must be a hypervisor frame. */ \ - if ( diff < PRIMARY_STACK_SIZE ) \ - ASSERT(!diff || ((r)->cs == __HYPERVISOR_CS)); \ - /* Return TRUE if it's a guest frame. */ \ - !diff || ((r)->cs != __HYPERVISOR_CS); \ -}) - -#define read_sreg(name) ({ \ - unsigned int __sel; \ - asm ( "mov %%" STR(name) ",%0" : "=r" (__sel) ); \ - __sel; \ -}) - -static inline void read_sregs(struct cpu_user_regs *regs) -{ - asm ( "mov %%ds, %0" : "=m" (regs->ds) ); - asm ( "mov %%es, %0" : "=m" (regs->es) ); - asm ( "mov %%fs, %0" : "=m" (regs->fs) ); - asm ( "mov %%gs, %0" : "=m" (regs->gs) ); -} - -#endif /* __X86_REGS_H__ */ diff --git a/xen/include/asm-x86/setup.h b/xen/include/asm-x86/setup.h deleted file mode 100644 index 7dc03b6b8d..0000000000 --- a/xen/include/asm-x86/setup.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef __X86_SETUP_H_ -#define __X86_SETUP_H_ - -#include -#include - -extern const char __2M_text_start[], __2M_text_end[]; -extern const char __ro_after_init_start[], __ro_after_init_end[]; -extern const char __2M_rodata_start[], __2M_rodata_end[]; -extern char __2M_init_start[], __2M_init_end[]; -extern char __2M_rwdata_start[], __2M_rwdata_end[]; - -extern unsigned long xenheap_initial_phys_start; -extern uint64_t boot_tsc_stamp; - -extern void *stack_start; - -void early_cpu_init(void); -void early_time_init(void); - -void set_nr_cpu_ids(unsigned int max_cpus); - -void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); -void arch_init_memory(void); -void subarch_init_memory(void); - -void init_IRQ(void); - -#ifdef CONFIG_VIDEO -void vesa_init(void); -void vesa_mtrr_init(void); -#else -static inline void vesa_init(void) {}; -static inline void vesa_mtrr_init(void) {}; -#endif - -int construct_dom0( - struct domain *d, - const module_t *kernel, unsigned long kernel_headroom, - module_t *initrd, - char *cmdline); -void setup_io_bitmap(struct domain *d); - -unsigned long initial_images_nrpages(nodeid_t node); -void discard_initial_images(void); -void *bootstrap_map(const module_t *mod); - -int xen_in_range(unsigned long mfn); - -void microcode_grab_module( - unsigned long *, const multiboot_info_t *); - -extern uint8_t kbd_shift_flags; - -#ifdef NDEBUG -# define highmem_start 0 -#else -extern unsigned long highmem_start; -#endif - -extern int8_t opt_smt; - -#ifdef CONFIG_SHADOW_PAGING -extern bool opt_dom0_shadow; -#else -#define opt_dom0_shadow false -#endif -extern bool opt_dom0_pvh; -extern bool opt_dom0_verbose; -extern bool opt_dom0_cpuid_faulting; -extern bool opt_dom0_msr_relaxed; - -#define max_init_domid (0) - -#endif diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h deleted file mode 100644 index e25f9604d8..0000000000 --- a/xen/include/asm-x86/shadow.h +++ /dev/null @@ -1,273 +0,0 @@ -/****************************************************************************** - * include/asm-x86/shadow.h - * - * Parts of this code are Copyright (c) 2006 by XenSource Inc. - * Parts of this code are Copyright (c) 2006 by Michael A Fetterman - * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef _XEN_SHADOW_H -#define _XEN_SHADOW_H - -#include -#include -#include -#include -#include -#include -#include - -#include - -/***************************************************************************** - * Macros to tell which shadow paging mode a domain is in*/ - -#define shadow_mode_enabled(_d) paging_mode_shadow(_d) -#define shadow_mode_refcounts(_d) (paging_mode_shadow(_d) && \ - paging_mode_refcounts(_d)) -#define shadow_mode_log_dirty(_d) (paging_mode_shadow(_d) && \ - paging_mode_log_dirty(_d)) -#define shadow_mode_translate(_d) (paging_mode_shadow(_d) && \ - paging_mode_translate(_d)) -#define shadow_mode_external(_d) (paging_mode_shadow(_d) && \ - paging_mode_external(_d)) - -/***************************************************************************** - * Entry points into the shadow code */ - -/* Set up the shadow-specific parts of a domain struct at start of day. - * Called from paging_domain_init(). */ -int shadow_domain_init(struct domain *d); - -/* Setup the shadow-specific parts of a vcpu struct. It is called by - * paging_vcpu_init() in paging.c */ -void shadow_vcpu_init(struct vcpu *v); - -#ifdef CONFIG_SHADOW_PAGING - -/* Enable an arbitrary shadow mode. Call once at domain creation. */ -int shadow_enable(struct domain *d, u32 mode); - -/* Enable VRAM dirty bit tracking. */ -int shadow_track_dirty_vram(struct domain *d, - unsigned long first_pfn, - unsigned int nr_frames, - XEN_GUEST_HANDLE(void) dirty_bitmap); - -/* Handler for shadow control ops: operations from user-space to enable - * and disable ephemeral shadow modes (test mode and log-dirty mode) and - * manipulate the log-dirty bitmap. */ -int shadow_domctl(struct domain *d, - struct xen_domctl_shadow_op *sc, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl); - -/* Call when destroying a vcpu/domain */ -void shadow_vcpu_teardown(struct vcpu *v); -void shadow_teardown(struct domain *d, bool *preempted); - -/* Call once all of the references to the domain have gone away */ -void shadow_final_teardown(struct domain *d); - -void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all); - -/* Adjust shadows ready for a guest page to change its type. */ -void shadow_prepare_page_type_change(struct domain *d, struct page_info *page, - unsigned long new_type); - -/* Discard _all_ mappings from the domain's shadows. */ -void shadow_blow_tables_per_domain(struct domain *d); - -/* Set the pool of shadow pages to the required number of pages. - * Input will be rounded up to at least shadow_min_acceptable_pages(), - * plus space for the p2m table. - * Returns 0 for success, non-zero for failure. */ -int shadow_set_allocation(struct domain *d, unsigned int pages, - bool *preempted); - -#else /* !CONFIG_SHADOW_PAGING */ - -#define shadow_vcpu_teardown(v) ASSERT(is_pv_vcpu(v)) -#define shadow_teardown(d, p) ASSERT(is_pv_domain(d)) -#define shadow_final_teardown(d) ASSERT(is_pv_domain(d)) -#define shadow_enable(d, mode) \ - ({ ASSERT(is_pv_domain(d)); -EOPNOTSUPP; }) -#define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \ - ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; }) -#define shadow_set_allocation(d, pages, preempted) \ - ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; }) - -static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn, - int fast, int all) {} - -static inline void shadow_prepare_page_type_change(struct domain *d, - struct page_info *page, - unsigned long new_type) {} - -static inline void shadow_blow_tables_per_domain(struct domain *d) {} - -static inline int shadow_domctl(struct domain *d, - struct xen_domctl_shadow_op *sc, - XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) -{ - return -EINVAL; -} - -#endif /* CONFIG_SHADOW_PAGING */ - -/* - * Mitigations for L1TF / CVE-2018-3620 for PV guests. - * - * We cannot alter an architecturally-legitimate PTE which a PV guest has - * chosen to write, as traditional paged-out metadata is L1TF-vulnerable. - * What we can do is force a PV guest which writes a vulnerable PTE into - * shadow mode, so Xen controls the pagetables which are reachable by the CPU - * pagewalk. - * - * The core of the L1TF vulnerability is that the address bits of the PTE - * (accounting for PSE and factoring in the level-relevant part of the linear - * access) are sent for an L1D lookup (to retrieve the next-level PTE, or - * eventual memory address) before the Present or reserved bits (which would - * cause a terminal fault) are accounted for. If an L1D hit occurs, the - * resulting data is available for potentially dependent instructions. - * - * For Present PTEs, the PV type-count safety logic ensures that the address - * bits always point at a guest-accessible frame, which is safe WRT L1TF from - * Xen's point of view. In practice, a PV guest should be unable to set any - * reserved bits, so should be unable to create any present L1TF-vulnerable - * PTEs at all. - * - * Therefore, these safety checks apply to Not-Present PTEs only, where - * traditionally, Xen would have let the guest write any value it chose. - * - * The all-zero PTE potentially leaks mfn 0. All software on the system is - * expected to cooperate and not put any secrets there. In a Xen system, - * neither Xen nor dom0 are expected to touch mfn 0, as it typically contains - * the real mode IVT and Bios Data Area. Therefore, mfn 0 is considered safe. - * - * Any PTE whose address is higher than the maximum cacheable address is safe, - * as it won't get an L1D hit. - * - * Speculative superpages also need accounting for, as PSE is considered - * irrespective of Present. We disallow PSE being set, as it allows an - * attacker to leak 2M or 1G of data starting from mfn 0. Also, because of - * recursive/linear pagetables, we must consider PSE even at L4, as hardware - * will interpret an L4e as an L3e during a recursive walk. - */ - -static inline bool is_l1tf_safe_maddr(intpte_t pte) -{ - paddr_t maddr = pte & l1tf_addr_mask; - - return maddr == 0 || maddr >= l1tf_safe_maddr; -} - -#ifdef CONFIG_PV - -static inline bool pv_l1tf_check_pte(struct domain *d, unsigned int level, - intpte_t pte) -{ - ASSERT(is_pv_domain(d)); - ASSERT(!(pte & _PAGE_PRESENT)); - - if ( d->arch.pv.check_l1tf && !paging_mode_sh_forced(d) && - (((level > 1) && (pte & _PAGE_PSE)) || !is_l1tf_safe_maddr(pte)) ) - { -#ifdef CONFIG_SHADOW_PAGING - struct tasklet *t = &d->arch.paging.shadow.pv_l1tf_tasklet; - - printk(XENLOG_G_WARNING - "d%d L1TF-vulnerable L%ue %016"PRIx64" - Shadowing\n", - d->domain_id, level, pte); - /* - * Safety consideration for accessing tasklet.scheduled_on without the - * tasklet lock. This is a singleshot tasklet with the side effect of - * setting PG_SH_forced (checked just above). Multiple vcpus can race - * to schedule the tasklet, but if we observe it scheduled anywhere, - * that is good enough. - */ - smp_rmb(); - if ( !tasklet_is_scheduled(t) ) - tasklet_schedule(t); -#else - printk(XENLOG_G_ERR - "d%d L1TF-vulnerable L%ue %016"PRIx64" - Crashing\n", - d->domain_id, level, pte); - domain_crash(d); -#endif - return true; - } - - return false; -} - -static inline bool pv_l1tf_check_l1e(struct domain *d, l1_pgentry_t l1e) -{ - return pv_l1tf_check_pte(d, 1, l1e.l1); -} - -static inline bool pv_l1tf_check_l2e(struct domain *d, l2_pgentry_t l2e) -{ - return pv_l1tf_check_pte(d, 2, l2e.l2); -} - -static inline bool pv_l1tf_check_l3e(struct domain *d, l3_pgentry_t l3e) -{ - return pv_l1tf_check_pte(d, 3, l3e.l3); -} - -static inline bool pv_l1tf_check_l4e(struct domain *d, l4_pgentry_t l4e) -{ - return pv_l1tf_check_pte(d, 4, l4e.l4); -} - -void pv_l1tf_tasklet(void *data); - -static inline void pv_l1tf_domain_init(struct domain *d) -{ - d->arch.pv.check_l1tf = is_hardware_domain(d) ? opt_pv_l1tf_hwdom - : opt_pv_l1tf_domu; - -#ifdef CONFIG_SHADOW_PAGING - tasklet_init(&d->arch.paging.shadow.pv_l1tf_tasklet, pv_l1tf_tasklet, d); -#endif -} - -static inline void pv_l1tf_domain_destroy(struct domain *d) -{ -#ifdef CONFIG_SHADOW_PAGING - tasklet_kill(&d->arch.paging.shadow.pv_l1tf_tasklet); -#endif -} - -#endif /* CONFIG_PV */ - -/* Remove all shadows of the guest mfn. */ -static inline void shadow_remove_all_shadows(struct domain *d, mfn_t gmfn) -{ - /* See the comment about locking in sh_remove_shadows */ - sh_remove_shadows(d, gmfn, 0 /* Be thorough */, 1 /* Must succeed */); -} - -#endif /* _XEN_SHADOW_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/shared.h b/xen/include/asm-x86/shared.h deleted file mode 100644 index dd3ae8c263..0000000000 --- a/xen/include/asm-x86/shared.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef __XEN_X86_SHARED_H__ -#define __XEN_X86_SHARED_H__ - -#ifdef CONFIG_COMPAT - -#define nmi_reason(d) (!has_32bit_shinfo(d) ? \ - (u32 *)&(d)->shared_info->native.arch.nmi_reason : \ - (u32 *)&(d)->shared_info->compat.arch.nmi_reason) - -#define GET_SET_SHARED(type, field) \ -static inline type arch_get_##field(const struct domain *d) \ -{ \ - return !has_32bit_shinfo(d) ? \ - d->shared_info->native.arch.field : \ - d->shared_info->compat.arch.field; \ -} \ -static inline void arch_set_##field(struct domain *d, \ - type val) \ -{ \ - if ( !has_32bit_shinfo(d) ) \ - d->shared_info->native.arch.field = val; \ - else \ - d->shared_info->compat.arch.field = val; \ -} - -#define GET_SET_VCPU(type, field) \ -static inline type arch_get_##field(const struct vcpu *v) \ -{ \ - return !has_32bit_shinfo(v->domain) ? \ - v->vcpu_info->native.arch.field : \ - v->vcpu_info->compat.arch.field; \ -} \ -static inline void arch_set_##field(struct vcpu *v, \ - type val) \ -{ \ - if ( !has_32bit_shinfo(v->domain) ) \ - v->vcpu_info->native.arch.field = val; \ - else \ - v->vcpu_info->compat.arch.field = val; \ -} - -#else - -#define nmi_reason(d) (&(d)->shared_info->arch.nmi_reason) - -#define GET_SET_SHARED(type, field) \ -static inline type arch_get_##field(const struct domain *d) \ -{ \ - return d->shared_info->arch.field; \ -} \ -static inline void arch_set_##field(struct domain *d, \ - type val) \ -{ \ - d->shared_info->arch.field = val; \ -} - -#define GET_SET_VCPU(type, field) \ -static inline type arch_get_##field(const struct vcpu *v) \ -{ \ - return v->vcpu_info->arch.field; \ -} \ -static inline void arch_set_##field(struct vcpu *v, \ - type val) \ -{ \ - v->vcpu_info->arch.field = val; \ -} - -#endif - -GET_SET_SHARED(unsigned long, max_pfn) -GET_SET_SHARED(xen_pfn_t, pfn_to_mfn_frame_list_list) -GET_SET_SHARED(unsigned long, nmi_reason) - -GET_SET_VCPU(unsigned long, cr2) - -#undef GET_SET_VCPU -#undef GET_SET_SHARED - -#endif /* __XEN_X86_SHARED_H__ */ diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h deleted file mode 100644 index f7485f602e..0000000000 --- a/xen/include/asm-x86/smp.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#ifndef __ASSEMBLY__ -#include -#include -#include -#include -#include -#endif - -#define BAD_APICID (-1U) -#define INVALID_CUID (~0U) /* AMD Compute Unit ID */ -#ifndef __ASSEMBLY__ - -/* - * Private routines/data - */ -DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask); -DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask); -DECLARE_PER_CPU(cpumask_var_t, scratch_cpumask); -DECLARE_PER_CPU(cpumask_var_t, send_ipi_cpumask); - -/* - * Do we, for platform reasons, need to actually keep CPUs online when we - * would otherwise prefer them to be off? - */ -extern bool park_offline_cpus; - -void smp_send_nmi_allbutself(void); - -void send_IPI_mask(const cpumask_t *, int vector); -void send_IPI_self(int vector); - -extern void (*mtrr_hook) (void); - -extern void zap_low_mappings(void); - -extern u32 x86_cpu_to_apicid[]; - -#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] - -#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -extern void cpu_exit_clear(unsigned int cpu); -extern void cpu_uninit(unsigned int cpu); -int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm); - -/* - * This function is needed by all SMP systems. It must _always_ be valid - * from the initial startup. We map APIC_BASE very early in page_setup(), - * so this is correct in the x86 case. - */ -#define smp_processor_id() get_processor_id() - -void __stop_this_cpu(void); - -long cpu_up_helper(void *data); -long cpu_down_helper(void *data); - -long core_parking_helper(void *data); -bool core_parking_remove(unsigned int cpu); -uint32_t get_cur_idle_nums(void); - -/* - * The value may be greater than the actual socket number in the system and - * is required not to change from the initial startup. - */ -extern unsigned int nr_sockets; - -void set_nr_sockets(void); - -/* Representing HT and core siblings in each socket. */ -extern cpumask_t **socket_cpumask; - -/* - * To be used only while no context switch can occur on the cpu, i.e. - * by certain scheduling code only. - */ -#define get_cpu_current(cpu) \ - (get_cpu_info_from_stack((unsigned long)stack_base[cpu])->current_vcpu) - -extern unsigned int disabled_cpus; -extern bool unaccounted_cpus; - -#endif /* !__ASSEMBLY__ */ - -#endif diff --git a/xen/include/asm-x86/softirq.h b/xen/include/asm-x86/softirq.h deleted file mode 100644 index 415ee866c7..0000000000 --- a/xen/include/asm-x86/softirq.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __ASM_SOFTIRQ_H__ -#define __ASM_SOFTIRQ_H__ - -#define NMI_SOFTIRQ (NR_COMMON_SOFTIRQS + 0) -#define TIME_CALIBRATE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1) -#define VCPU_KICK_SOFTIRQ (NR_COMMON_SOFTIRQS + 2) - -#define MACHINE_CHECK_SOFTIRQ (NR_COMMON_SOFTIRQS + 3) -#define HVM_DPCI_SOFTIRQ (NR_COMMON_SOFTIRQS + 4) -#define NR_ARCH_SOFTIRQS 5 - -bool arch_skip_send_event_check(unsigned int cpu); - -#endif /* __ASM_SOFTIRQ_H__ */ diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h deleted file mode 100644 index a803d16f90..0000000000 --- a/xen/include/asm-x86/spec_ctrl.h +++ /dev/null @@ -1,151 +0,0 @@ -/****************************************************************************** - * include/asm-x86/spec_ctrl.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - * - * Copyright (c) 2017-2018 Citrix Systems Ltd. - */ - -#ifndef __X86_SPEC_CTRL_H__ -#define __X86_SPEC_CTRL_H__ - -/* Encoding of cpuinfo.spec_ctrl_flags */ -#define SCF_use_shadow (1 << 0) -#define SCF_ist_wrmsr (1 << 1) -#define SCF_ist_rsb (1 << 2) - -#ifndef __ASSEMBLY__ - -#include -#include -#include - -void init_speculation_mitigations(void); - -extern bool opt_ibpb; -extern bool opt_ssbd; -extern int8_t opt_eager_fpu; -extern int8_t opt_l1d_flush; - -extern bool bsp_delay_spec_ctrl; -extern uint8_t default_xen_spec_ctrl; -extern uint8_t default_spec_ctrl_flags; - -extern int8_t opt_xpti_hwdom, opt_xpti_domu; - -extern bool cpu_has_bug_l1tf; -extern int8_t opt_pv_l1tf_hwdom, opt_pv_l1tf_domu; - -/* - * The L1D address mask, which might be wider than reported in CPUID, and the - * system physical address above which there are believed to be no cacheable - * memory regions, thus unable to leak data via the L1TF vulnerability. - */ -extern paddr_t l1tf_addr_mask, l1tf_safe_maddr; - -extern uint64_t default_xen_mcu_opt_ctrl; - -static inline void init_shadow_spec_ctrl_state(void) -{ - struct cpu_info *info = get_cpu_info(); - - info->shadow_spec_ctrl = 0; - info->xen_spec_ctrl = default_xen_spec_ctrl; - info->spec_ctrl_flags = default_spec_ctrl_flags; - - /* - * For least latency, the VERW selector should be a writeable data - * descriptor resident in the cache. __HYPERVISOR_DS32 shares a cache - * line with __HYPERVISOR_CS, so is expected to be very cache-hot. - */ - info->verw_sel = __HYPERVISOR_DS32; -} - -/* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */ -static always_inline void spec_ctrl_enter_idle(struct cpu_info *info) -{ - uint32_t val = 0; - - /* - * Branch Target Injection: - * - * Latch the new shadow value, then enable shadowing, then update the MSR. - * There are no SMP issues here; only local processor ordering concerns. - */ - info->shadow_spec_ctrl = val; - barrier(); - info->spec_ctrl_flags |= SCF_use_shadow; - barrier(); - alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE, - "a" (val), "c" (MSR_SPEC_CTRL), "d" (0)); - barrier(); - - /* - * Microarchitectural Store Buffer Data Sampling: - * - * On vulnerable systems, store buffer entries are statically partitioned - * between active threads. When entering idle, our store buffer entries - * are re-partitioned to allow the other threads to use them. - * - * Flush the buffers to ensure that no sensitive data of ours can be - * leaked by a sibling after it gets our store buffer entries. - * - * Note: VERW must be encoded with a memory operand, as it is only that - * form which causes a flush. - */ - alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE, - [sel] "m" (info->verw_sel)); -} - -/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */ -static always_inline void spec_ctrl_exit_idle(struct cpu_info *info) -{ - uint32_t val = info->xen_spec_ctrl; - - /* - * Branch Target Injection: - * - * Disable shadowing before updating the MSR. There are no SMP issues - * here; only local processor ordering concerns. - */ - info->spec_ctrl_flags &= ~SCF_use_shadow; - barrier(); - alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE, - "a" (val), "c" (MSR_SPEC_CTRL), "d" (0)); - barrier(); - - /* - * Microarchitectural Store Buffer Data Sampling: - * - * On vulnerable systems, store buffer entries are statically partitioned - * between active threads. When exiting idle, the other threads store - * buffer entries are re-partitioned to give us some. - * - * We now have store buffer entries with stale data from sibling threads. - * A flush if necessary will be performed on the return to guest path. - */ -} - -#endif /* __ASSEMBLY__ */ -#endif /* !__X86_SPEC_CTRL_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h deleted file mode 100644 index cb34299a86..0000000000 --- a/xen/include/asm-x86/spec_ctrl_asm.h +++ /dev/null @@ -1,342 +0,0 @@ -/****************************************************************************** - * include/asm-x86/spec_ctrl.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - * - * Copyright (c) 2017-2018 Citrix Systems Ltd. - */ - -#ifndef __X86_SPEC_CTRL_ASM_H__ -#define __X86_SPEC_CTRL_ASM_H__ - -#ifdef __ASSEMBLY__ -#include -#include - -/* - * Saving and restoring MSR_SPEC_CTRL state is a little tricky. - * - * We want the guests choice of SPEC_CTRL while in guest context, and Xen's - * choice (set or clear, depending on the hardware) while running in Xen - * context. Therefore, a simplistic algorithm is: - * - * - Set/clear IBRS on entry to Xen - * - Set the guests' choice on exit to guest - * - Leave SPEC_CTRL unchanged on exit to xen - * - * There are two complicating factors: - * 1) HVM guests can have direct access to the MSR, so it can change - * behind Xen's back. - * 2) An NMI or MCE can interrupt at any point, including early in the entry - * path, or late in the exit path after restoring the guest value. This - * will corrupt the guest value. - * - * Factor 1 is dealt with by relying on NMIs/MCEs being blocked immediately - * after VMEXIT. The VMEXIT-specific code reads MSR_SPEC_CTRL and updates - * current before loading Xen's MSR_SPEC_CTRL setting. - * - * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow - * boolean in the per cpu spec_ctrl_flags. The synchronous use is: - * - * 1) Store guest value in shadow_spec_ctrl - * 2) Set the use_shadow boolean - * 3) Load guest value into MSR_SPEC_CTRL - * 4) Exit to guest - * 5) Entry from guest - * 6) Clear the use_shadow boolean - * 7) Load Xen's value into MSR_SPEC_CTRL - * - * The asynchronous use for interrupts/exceptions is: - * - Set/clear IBRS on entry to Xen - * - On exit to Xen, check use_shadow - * - If set, load shadow_spec_ctrl - * - * Therefore, an interrupt/exception which hits the synchronous path between - * steps 2 and 6 will restore the shadow value rather than leaving Xen's value - * loaded and corrupting the value used in guest context. - * - * The following ASM fragments implement this algorithm. See their local - * comments for further details. - * - SPEC_CTRL_ENTRY_FROM_HVM - * - SPEC_CTRL_ENTRY_FROM_PV - * - SPEC_CTRL_ENTRY_FROM_INTR - * - SPEC_CTRL_ENTRY_FROM_INTR_IST - * - SPEC_CTRL_EXIT_TO_XEN_IST - * - SPEC_CTRL_EXIT_TO_XEN - * - SPEC_CTRL_EXIT_TO_PV - * - SPEC_CTRL_EXIT_TO_HVM - */ - -.macro DO_OVERWRITE_RSB tmp=rax -/* - * Requires nothing - * Clobbers \tmp (%rax by default), %rcx - * - * Requires 256 bytes of {,shadow}stack space, but %rsp/SSP has no net - * change. Based on Google's performance numbers, the loop is unrolled to 16 - * iterations and two calls per iteration. - * - * The call filling the RSB needs a nonzero displacement. A nop would do, but - * we use "1: pause; lfence; jmp 1b" to safely contains any ret-based - * speculation, even if the loop is speculatively executed prematurely. - * - * %rsp is preserved by using an extra GPR because a) we've got plenty spare, - * b) the two movs are shorter to encode than `add $32*8, %rsp`, and c) can be - * optimised with mov-elimination in modern cores. - */ - mov $16, %ecx /* 16 iterations, two calls per loop */ - mov %rsp, %\tmp /* Store the current %rsp */ - -.L\@_fill_rsb_loop: - - .irp n, 1, 2 /* Unrolled twice. */ - call .L\@_insert_rsb_entry_\n /* Create an RSB entry. */ - -.L\@_capture_speculation_\n: - pause - lfence - jmp .L\@_capture_speculation_\n /* Capture rogue speculation. */ - -.L\@_insert_rsb_entry_\n: - .endr - - sub $1, %ecx - jnz .L\@_fill_rsb_loop - mov %\tmp, %rsp /* Restore old %rsp */ - -#ifdef CONFIG_XEN_SHSTK - mov $1, %ecx - rdsspd %ecx - cmp $1, %ecx - je .L\@_shstk_done - mov $64, %ecx /* 64 * 4 bytes, given incsspd */ - incsspd %ecx /* Restore old SSP */ -.L\@_shstk_done: -#endif -.endm - -.macro DO_SPEC_CTRL_ENTRY_FROM_HVM -/* - * Requires %rbx=current, %rsp=regs/cpuinfo - * Clobbers %rax, %rcx, %rdx - * - * The common case is that a guest has direct access to MSR_SPEC_CTRL, at - * which point we need to save the guest value before setting IBRS for Xen. - * Unilaterally saving the guest value is shorter and faster than checking. - */ - mov $MSR_SPEC_CTRL, %ecx - rdmsr - - /* Stash the value from hardware. */ - mov VCPU_arch_msrs(%rbx), %rdx - mov %eax, VCPUMSR_spec_ctrl_raw(%rdx) - xor %edx, %edx - - /* Clear SPEC_CTRL shadowing *before* loading Xen's value. */ - andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) - - /* Load Xen's intended value. */ - movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax - wrmsr -.endm - -.macro DO_SPEC_CTRL_ENTRY maybexen:req -/* - * Requires %rsp=regs (also cpuinfo if !maybexen) - * Requires %r14=stack_end (if maybexen) - * Clobbers %rax, %rcx, %rdx - * - * PV guests can't update MSR_SPEC_CTRL behind Xen's back, so no need to read - * it back. Entries from guest context need to clear SPEC_CTRL shadowing, - * while entries from Xen must leave shadowing in its current state. - */ - mov $MSR_SPEC_CTRL, %ecx - xor %edx, %edx - - /* - * Clear SPEC_CTRL shadowing *before* loading Xen's value. If entering - * from a possibly-xen context, %rsp doesn't necessarily alias the cpuinfo - * block so calculate the position directly. - */ - .if \maybexen - xor %eax, %eax - /* Branchless `if ( !xen ) clear_shadowing` */ - testb $3, UREGS_cs(%rsp) - setnz %al - not %eax - and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) - movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax - .else - andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) - movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax - .endif - - wrmsr -.endm - -.macro DO_SPEC_CTRL_EXIT_TO_XEN -/* - * Requires %rbx=stack_end - * Clobbers %rax, %rcx, %rdx - * - * When returning to Xen context, look to see whether SPEC_CTRL shadowing is - * in effect, and reload the shadow value. This covers race conditions which - * exist with an NMI/MCE/etc hitting late in the return-to-guest path. - */ - xor %edx, %edx - - testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) - jz .L\@_skip - - mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%rbx), %eax - mov $MSR_SPEC_CTRL, %ecx - wrmsr - -.L\@_skip: -.endm - -.macro DO_SPEC_CTRL_EXIT_TO_GUEST -/* - * Requires %eax=spec_ctrl, %rsp=regs/cpuinfo - * Clobbers %rcx, %rdx - * - * When returning to guest context, set up SPEC_CTRL shadowing and load the - * guest value. - */ - /* Set up shadow value *before* enabling shadowing. */ - mov %eax, CPUINFO_shadow_spec_ctrl(%rsp) - - /* Set SPEC_CTRL shadowing *before* loading the guest value. */ - orb $SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) - - mov $MSR_SPEC_CTRL, %ecx - xor %edx, %edx - wrmsr -.endm - -/* Use after a VMEXIT from an HVM guest. */ -#define SPEC_CTRL_ENTRY_FROM_HVM \ - ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \ - ALTERNATIVE "", DO_SPEC_CTRL_ENTRY_FROM_HVM, \ - X86_FEATURE_SC_MSR_HVM - -/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */ -#define SPEC_CTRL_ENTRY_FROM_PV \ - ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \ - ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), \ - X86_FEATURE_SC_MSR_PV - -/* Use in interrupt/exception context. May interrupt Xen or PV context. */ -#define SPEC_CTRL_ENTRY_FROM_INTR \ - ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \ - ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), \ - X86_FEATURE_SC_MSR_PV - -/* Use when exiting to Xen context. */ -#define SPEC_CTRL_EXIT_TO_XEN \ - ALTERNATIVE "", \ - DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR_PV - -/* Use when exiting to PV guest context. */ -#define SPEC_CTRL_EXIT_TO_PV \ - ALTERNATIVE "", \ - DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV; \ - ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \ - X86_FEATURE_SC_VERW_PV - -/* Use when exiting to HVM guest context. */ -#define SPEC_CTRL_EXIT_TO_HVM \ - ALTERNATIVE "", \ - DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM; \ - ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \ - X86_FEATURE_SC_VERW_HVM - -/* - * Use in IST interrupt/exception context. May interrupt Xen or PV context. - * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume - * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has - * been reloaded. - */ -.macro SPEC_CTRL_ENTRY_FROM_INTR_IST -/* - * Requires %rsp=regs, %r14=stack_end - * Clobbers %rax, %rcx, %rdx - * - * This is logical merge of DO_OVERWRITE_RSB and DO_SPEC_CTRL_ENTRY - * maybexen=1, but with conditionals rather than alternatives. - */ - movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %eax - - test $SCF_ist_rsb, %al - jz .L\@_skip_rsb - - DO_OVERWRITE_RSB tmp=rdx /* Clobbers %rcx/%rdx */ - -.L\@_skip_rsb: - - test $SCF_ist_wrmsr, %al - jz .L\@_skip_wrmsr - - xor %edx, %edx - testb $3, UREGS_cs(%rsp) - setnz %dl - not %edx - and %dl, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) - - /* Load Xen's intended value. */ - mov $MSR_SPEC_CTRL, %ecx - movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax - xor %edx, %edx - wrmsr - - /* Opencoded UNLIKELY_START() with no condition. */ -UNLIKELY_DISPATCH_LABEL(\@_serialise): - .subsection 1 - /* - * In the case that we might need to set SPEC_CTRL.IBRS for safety, we - * need to ensure that an attacker can't poison the `jz .L\@_skip_wrmsr` - * to speculate around the WRMSR. As a result, we need a dispatch - * serialising instruction in the else clause. - */ -.L\@_skip_wrmsr: - lfence - UNLIKELY_END(\@_serialise) -.endm - -/* Use when exiting to Xen in IST context. */ -.macro SPEC_CTRL_EXIT_TO_XEN_IST -/* - * Requires %rbx=stack_end - * Clobbers %rax, %rcx, %rdx - */ - testb $SCF_ist_wrmsr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) - jz .L\@_skip - - DO_SPEC_CTRL_EXIT_TO_XEN - -.L\@_skip: -.endm - -#endif /* __ASSEMBLY__ */ -#endif /* !__X86_SPEC_CTRL_ASM_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/spinlock.h b/xen/include/asm-x86/spinlock.h deleted file mode 100644 index 56f6095752..0000000000 --- a/xen/include/asm-x86/spinlock.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -#define _raw_read_unlock(l) \ - BUILD_BUG_ON(sizeof((l)->lock) != 4); /* Clang doesn't support %z in asm. */ \ - asm volatile ( "lock; decl %0" : "+m" ((l)->lock) :: "memory" ) - -/* - * On x86 the only reordering is of reads with older writes. In the - * lock case, the read in observe_head() can only be reordered with - * writes that precede it, and moving a write _into_ a locked section - * is OK. In the release case, the write in add_sized() can only be - * reordered with reads that follow it, and hoisting a read _into_ a - * locked region is OK. - */ -#define arch_lock_acquire_barrier() barrier() -#define arch_lock_release_barrier() barrier() - -#define arch_lock_relax() cpu_relax() -#define arch_lock_signal() -#define arch_lock_signal_wmb() \ -({ \ - smp_wmb(); \ - arch_lock_signal(); \ -}) - -#endif /* __ASM_SPINLOCK_H */ diff --git a/xen/include/asm-x86/string.h b/xen/include/asm-x86/string.h deleted file mode 100644 index f08d95096e..0000000000 --- a/xen/include/asm-x86/string.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __X86_STRING_H__ -#define __X86_STRING_H__ - -#endif /* __X86_STRING_H__ */ -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h deleted file mode 100644 index 65e63de69a..0000000000 --- a/xen/include/asm-x86/system.h +++ /dev/null @@ -1,295 +0,0 @@ -#ifndef __ASM_SYSTEM_H -#define __ASM_SYSTEM_H - -#include -#include -#include - -static inline void wbinvd(void) -{ - asm volatile ( "wbinvd" ::: "memory" ); -} - -static inline void wbnoinvd(void) -{ - asm volatile ( "repe; wbinvd" : : : "memory" ); -} - -static inline void clflush(const void *p) -{ - asm volatile ( "clflush %0" :: "m" (*(const char *)p) ); -} - -static inline void clflushopt(const void *p) -{ - asm volatile ( "data16 clflush %0" :: "m" (*(const char *)p) ); -} - -static inline void clwb(const void *p) -{ -#if defined(HAVE_AS_CLWB) - asm volatile ( "clwb %0" :: "m" (*(const char *)p) ); -#elif defined(HAVE_AS_XSAVEOPT) - asm volatile ( "data16 xsaveopt %0" :: "m" (*(const char *)p) ); -#else - asm volatile ( ".byte 0x66, 0x0f, 0xae, 0x32" - :: "d" (p), "m" (*(const char *)p) ); -#endif -} - -#define xchg(ptr,v) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) - -#include - -/* - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway - * Note 2: xchg has side effect, so that attribute volatile is necessary, - * but generally the primitive is invalid, *ptr is output argument. --ANK - */ -static always_inline unsigned long __xchg( - unsigned long x, volatile void *ptr, int size) -{ - switch ( size ) - { - case 1: - asm volatile ( "xchg %b[x], %[ptr]" - : [x] "+q" (x), [ptr] "+m" (*(volatile uint8_t *)ptr) - :: "memory" ); - break; - case 2: - asm volatile ( "xchg %w[x], %[ptr]" - : [x] "+r" (x), [ptr] "+m" (*(volatile uint16_t *)ptr) - :: "memory" ); - break; - case 4: - asm volatile ( "xchg %k[x], %[ptr]" - : [x] "+r" (x), [ptr] "+m" (*(volatile uint32_t *)ptr) - :: "memory" ); - break; - case 8: - asm volatile ( "xchg %q[x], %[ptr]" - : [x] "+r" (x), [ptr] "+m" (*(volatile uint64_t *)ptr) - :: "memory" ); - break; - } - return x; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -static always_inline unsigned long __cmpxchg( - volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - unsigned long prev; - switch ( size ) - { - case 1: - asm volatile ( "lock cmpxchg %b[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(volatile uint8_t *)ptr) - : [new] "q" (new), "a" (old) - : "memory" ); - return prev; - case 2: - asm volatile ( "lock cmpxchg %w[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(volatile uint16_t *)ptr) - : [new] "r" (new), "a" (old) - : "memory" ); - return prev; - case 4: - asm volatile ( "lock cmpxchg %k[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(volatile uint32_t *)ptr) - : [new] "r" (new), "a" (old) - : "memory" ); - return prev; - case 8: - asm volatile ( "lock cmpxchg %q[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(volatile uint64_t *)ptr) - : [new] "r" (new), "a" (old) - : "memory" ); - return prev; - } - return old; -} - -static always_inline unsigned long cmpxchg_local_( - void *ptr, unsigned long old, unsigned long new, unsigned int size) -{ - unsigned long prev = ~old; - - switch ( size ) - { - case 1: - asm volatile ( "cmpxchg %b[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(uint8_t *)ptr) - : [new] "q" (new), "a" (old) ); - break; - case 2: - asm volatile ( "cmpxchg %w[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(uint16_t *)ptr) - : [new] "r" (new), "a" (old) ); - break; - case 4: - asm volatile ( "cmpxchg %k[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(uint32_t *)ptr) - : [new] "r" (new), "a" (old) ); - break; - case 8: - asm volatile ( "cmpxchg %q[new], %[ptr]" - : "=a" (prev), [ptr] "+m" (*(uint64_t *)ptr) - : [new] "r" (new), "a" (old) ); - break; - } - - return prev; -} - -/* - * Undefined symbol to cause link failure if a wrong size is used with - * arch_fetch_and_add(). - */ -extern unsigned long __bad_fetch_and_add_size(void); - -static always_inline unsigned long __xadd( - volatile void *ptr, unsigned long v, int size) -{ - switch ( size ) - { - case 1: - asm volatile ( "lock xadd %b[v], %[ptr]" - : [v] "+q" (v), [ptr] "+m" (*(volatile uint8_t *)ptr) - :: "memory"); - return v; - case 2: - asm volatile ( "lock xadd %w[v], %[ptr]" - : [v] "+r" (v), [ptr] "+m" (*(volatile uint16_t *)ptr) - :: "memory"); - return v; - case 4: - asm volatile ( "lock xadd %k[v], %[ptr]" - : [v] "+r" (v), [ptr] "+m" (*(volatile uint32_t *)ptr) - :: "memory"); - return v; - case 8: - asm volatile ( "lock xadd %q[v], %[ptr]" - : [v] "+r" (v), [ptr] "+m" (*(volatile uint64_t *)ptr) - :: "memory"); - - return v; - default: - return __bad_fetch_and_add_size(); - } -} - -/* - * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr. Returns - * the previous value. - * - * This is a full memory barrier. - */ -#define arch_fetch_and_add(ptr, v) \ - ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr)))) - -/* - * Mandatory barriers, for enforced ordering of reads and writes, e.g. for use - * with MMIO devices mapped with reduced cacheability. - */ -#define mb() asm volatile ( "mfence" ::: "memory" ) -#define rmb() asm volatile ( "lfence" ::: "memory" ) -#define wmb() asm volatile ( "sfence" ::: "memory" ) - -/* - * SMP barriers, for ordering of reads and writes between CPUs, most commonly - * used with shared memory. - * - * Both Intel and AMD agree that, from a programmer's viewpoint: - * Loads cannot be reordered relative to other loads. - * Stores cannot be reordered relative to other stores. - * Loads may be reordered ahead of a unaliasing stores. - * - * Refer to the vendor system programming manuals for further details. - */ -#define smp_mb() asm volatile ( "lock addl $0, -4(%%rsp)" ::: "memory" ) -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) - -#define smp_mb__before_atomic() do { } while (0) -#define smp_mb__after_atomic() do { } while (0) - -/** - * array_index_mask_nospec() - generate a mask that is ~0UL when the - * bounds check succeeds and 0 otherwise - * @index: array element index - * @size: number of elements in array - * - * Returns: - * 0 - (index < size) - */ -static inline unsigned long array_index_mask_nospec(unsigned long index, - unsigned long size) -{ - unsigned long mask; - - asm volatile ( "cmp %[size], %[index]; sbb %[mask], %[mask];" - : [mask] "=r" (mask) - : [size] "g" (size), [index] "r" (index) ); - - return mask; -} - -/* Override default implementation in nospec.h. */ -#define array_index_mask_nospec array_index_mask_nospec - -#define local_irq_disable() asm volatile ( "cli" : : : "memory" ) -#define local_irq_enable() asm volatile ( "sti" : : : "memory" ) - -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() asm volatile ( "sti; hlt" : : : "memory" ) -/* used when interrupts are already enabled or to shutdown the processor */ -#define halt() asm volatile ( "hlt" : : : "memory" ) - -#define local_save_flags(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \ -}) -#define local_irq_save(x) \ -({ \ - local_save_flags(x); \ - local_irq_disable(); \ -}) -#define local_irq_restore(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile ( "pushfq\n\t" \ - "andq %0, (%%rsp)\n\t" \ - "orq %1, (%%rsp)\n\t" \ - "popfq" \ - : : "i?r" ( ~X86_EFLAGS_IF ), \ - "ri" ( (x) & X86_EFLAGS_IF ) ); \ -}) - -static inline int local_irq_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !!(flags & X86_EFLAGS_IF); -} - -#define BROKEN_ACPI_Sx 0x0001 -#define BROKEN_INIT_AFTER_S1 0x0002 - -void trap_init(void); -void init_idt_traps(void); -void load_system_tables(void); -void percpu_traps_init(void); -void subarch_percpu_traps_init(void); - -#endif diff --git a/xen/include/asm-x86/tboot.h b/xen/include/asm-x86/tboot.h deleted file mode 100644 index bfeed1542f..0000000000 --- a/xen/include/asm-x86/tboot.h +++ /dev/null @@ -1,160 +0,0 @@ -/* - * tboot.h: shared data structure with MLE and kernel and functions - * used by kernel for runtime support - * - * Copyright (c) 2006-2007, Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of the Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef __TBOOT_H__ -#define __TBOOT_H__ - -#include - -typedef struct __packed { - uint32_t data1; - uint16_t data2; - uint16_t data3; - uint16_t data4; - uint8_t data5[6]; -} uuid_t; - -/* used to communicate between tboot and the launched kernel (i.e. Xen) */ - -#define TB_KEY_SIZE 64 /* 512 bits */ - -#define MAX_TB_MAC_REGIONS 32 -typedef struct __packed { - uint64_t start; /* must be 64 byte -aligned */ - uint32_t size; /* must be 64 byte -granular */ -} tboot_mac_region_t; - -/* GAS - Generic Address Structure (ACPI 2.0+) */ -typedef struct __packed { - uint8_t space_id; - uint8_t bit_width; - uint8_t bit_offset; - uint8_t access_width; - uint64_t address; -} tboot_acpi_generic_address_t; - -typedef struct __packed { - tboot_acpi_generic_address_t pm1a_cnt_blk; - tboot_acpi_generic_address_t pm1b_cnt_blk; - tboot_acpi_generic_address_t pm1a_evt_blk; - tboot_acpi_generic_address_t pm1b_evt_blk; - uint16_t pm1a_cnt_val; - uint16_t pm1b_cnt_val; - uint64_t wakeup_vector; - uint32_t vector_width; - uint64_t kernel_s3_resume_vector; -} tboot_acpi_sleep_info_t; - -typedef struct __packed { - /* version 3+ fields: */ - uuid_t uuid; /* {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} */ - uint32_t version; /* Version number; currently supports 0.6 */ - uint32_t log_addr; /* physical addr of tb_log_t log */ - uint32_t shutdown_entry; /* entry point for tboot shutdown */ - uint32_t shutdown_type; /* type of shutdown (TB_SHUTDOWN_*) */ - tboot_acpi_sleep_info_t - acpi_sinfo; /* where kernel put acpi sleep info in Sx */ - uint32_t tboot_base; /* starting addr for tboot */ - uint32_t tboot_size; /* size of tboot */ - uint8_t num_mac_regions; /* number mem regions to MAC on S3 */ - /* contig regions memory to MAC on S3 */ - tboot_mac_region_t mac_regions[MAX_TB_MAC_REGIONS]; - /* version 4+ fields: */ - /* populated by tboot; will be encrypted */ - uint8_t s3_key[TB_KEY_SIZE]; - /* version 5+ fields: */ - uint8_t reserved_align[3]; /* used to 4byte-align num_in_wfs */ - uint32_t num_in_wfs; /* number of processors in wait-for-SIPI */ - /* version 6+ fields: */ - uint32_t flags; - uint64_t ap_wake_addr; /* phys addr of kernel/VMM SIPI vector */ - uint32_t ap_wake_trigger; /* kernel/VMM writes APIC ID to wake AP */ -} tboot_shared_t; - -#define TB_SHUTDOWN_REBOOT 0 -#define TB_SHUTDOWN_S5 1 -#define TB_SHUTDOWN_S4 2 -#define TB_SHUTDOWN_S3 3 -#define TB_SHUTDOWN_HALT 4 - -#define TB_FLAG_AP_WAKE_SUPPORT 0x00000001 /* kernel/VMM use INIT-SIPI-SIPI - if clear, ap_wake_* if set */ - -/* {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} */ -#define TBOOT_SHARED_UUID { 0x663c8dff, 0xe8b3, 0x4b82, 0xaabf, \ - { 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8 } }; - -extern tboot_shared_t *g_tboot_shared; - -#ifdef CONFIG_TBOOT -void tboot_probe(void); -void tboot_shutdown(uint32_t shutdown_type); -int tboot_in_measured_env(void); -int tboot_protect_mem_regions(void); -int tboot_parse_dmar_table(acpi_table_handler dmar_handler); -int tboot_s3_resume(void); -void tboot_s3_error(int error); -int tboot_wake_ap(int apicid, unsigned long sipi_vec); -#else -static inline void tboot_probe(void) {} -static inline void tboot_shutdown(uint32_t shutdown_type) {} -static inline int tboot_in_measured_env(void) { return 0; } -static inline int tboot_protect_mem_regions(void) { return 1; } - -static inline int tboot_parse_dmar_table(acpi_table_handler dmar_handler) -{ - return acpi_table_parse(ACPI_SIG_DMAR, dmar_handler); -} - -static inline int tboot_s3_resume(void) { return 0; } -static inline void tboot_s3_error(int error) {} -static inline int tboot_wake_ap(int apicid, unsigned long sipi_vec) -{ - return 1; -} -#endif /* CONFIG_TBOOT */ - -#endif /* __TBOOT_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/time.h b/xen/include/asm-x86/time.h deleted file mode 100644 index f347311cc4..0000000000 --- a/xen/include/asm-x86/time.h +++ /dev/null @@ -1,76 +0,0 @@ - -#ifndef __X86_TIME_H__ -#define __X86_TIME_H__ - -#include - -/* - * PV TSC emulation modes: - * 0 = guest rdtsc/p executed natively when monotonicity can be guaranteed - * and emulated otherwise (with frequency scaled if necessary) - * 1 = guest rdtsc/p always emulated at 1GHz (kernel and user) - * 2 = guest rdtsc always executed natively (no monotonicity/frequency - * guarantees); guest rdtscp emulated at native frequency if - * unsupported by h/w, else executed natively - * 3 = Removed, was PVRDTSCP. - */ -#define TSC_MODE_DEFAULT 0 -#define TSC_MODE_ALWAYS_EMULATE 1 -#define TSC_MODE_NEVER_EMULATE 2 - -typedef u64 cycles_t; - -extern bool disable_tsc_sync; - -static inline cycles_t get_cycles(void) -{ - return rdtsc_ordered(); -} - -unsigned long -mktime (unsigned int year, unsigned int mon, - unsigned int day, unsigned int hour, - unsigned int min, unsigned int sec); - -int time_suspend(void); -int time_resume(void); - -void init_percpu_time(void); -void time_latch_stamps(void); - -struct ioreq; -int hwdom_pit_access(struct ioreq *ioreq); - -int cpu_frequency_change(u64 freq); - -void pit_broadcast_enter(void); -void pit_broadcast_exit(void); -int pit_broadcast_is_available(void); - -uint64_t acpi_pm_tick_to_ns(uint64_t ticks); -uint64_t ns_to_acpi_pm_tick(uint64_t ns); - -uint64_t tsc_ticks2ns(uint64_t ticks); - -uint64_t pv_soft_rdtsc(const struct vcpu *v, const struct cpu_user_regs *regs); -u64 gtime_to_gtsc(struct domain *d, u64 time); -u64 gtsc_to_gtime(struct domain *d, u64 tsc); - -int tsc_set_info(struct domain *d, uint32_t tsc_mode, uint64_t elapsed_nsec, - uint32_t gtsc_khz, uint32_t incarnation); - -void tsc_get_info(struct domain *d, uint32_t *tsc_mode, uint64_t *elapsed_nsec, - uint32_t *gtsc_khz, uint32_t *incarnation); - - -void force_update_vcpu_system_time(struct vcpu *v); - -bool clocksource_is_tsc(void); -int host_tsc_is_safe(void); -u64 stime2tsc(s_time_t stime); - -struct time_scale; -void set_time_scale(struct time_scale *ts, u64 ticks_per_sec); -u64 scale_delta(u64 delta, const struct time_scale *scale); - -#endif /* __X86_TIME_H__ */ diff --git a/xen/include/asm-x86/trace.h b/xen/include/asm-x86/trace.h deleted file mode 100644 index edef1bb099..0000000000 --- a/xen/include/asm-x86/trace.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __ASM_TRACE_H__ -#define __ASM_TRACE_H__ - -#endif /* __ASM_TRACE_H__ */ diff --git a/xen/include/asm-x86/traps.h b/xen/include/asm-x86/traps.h deleted file mode 100644 index ec23d3a70b..0000000000 --- a/xen/include/asm-x86/traps.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2007, 2008 Advanced Micro Devices, Inc. - * Author: Christoph Egger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef ASM_TRAP_H -#define ASM_TRAP_H - -const char *trapstr(unsigned int trapnr); - -#endif /* ASM_TRAP_H */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/types.h b/xen/include/asm-x86/types.h deleted file mode 100644 index 7817132048..0000000000 --- a/xen/include/asm-x86/types.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef __X86_TYPES_H__ -#define __X86_TYPES_H__ - -#ifndef __ASSEMBLY__ - -typedef __signed__ char __s8; -typedef unsigned char __u8; - -typedef __signed__ short __s16; -typedef unsigned short __u16; - -typedef __signed__ int __s32; -typedef unsigned int __u32; - -#if defined(__GNUC__) && !defined(__STRICT_ANSI__) -typedef __signed__ long __s64; -typedef unsigned long __u64; -#endif - -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long s64; -typedef unsigned long u64; -typedef unsigned long paddr_t; -#define INVALID_PADDR (~0UL) -#define PRIpaddr "016lx" - -#if defined(__SIZE_TYPE__) -typedef __SIZE_TYPE__ size_t; -#else -typedef unsigned long size_t; -#endif -typedef signed long ssize_t; - -#if defined(__PTRDIFF_TYPE__) -typedef __PTRDIFF_TYPE__ ptrdiff_t; -#else -typedef signed long ptrdiff_t; -#endif - -#endif /* __ASSEMBLY__ */ - -#endif /* __X86_TYPES_H__ */ diff --git a/xen/include/asm-x86/uaccess.h b/xen/include/asm-x86/uaccess.h deleted file mode 100644 index 684fccd95c..0000000000 --- a/xen/include/asm-x86/uaccess.h +++ /dev/null @@ -1,429 +0,0 @@ - -#ifndef __X86_UACCESS_H__ -#define __X86_UACCESS_H__ - -#include -#include -#include -#include - -#include - -unsigned int copy_to_guest_pv(void __user *to, const void *from, - unsigned int len); -unsigned int clear_guest_pv(void __user *to, unsigned int len); -unsigned int copy_from_guest_pv(void *to, const void __user *from, - unsigned int len); - -/* Handles exceptions in both to and from, but doesn't do access_ok */ -unsigned int copy_to_guest_ll(void __user*to, const void *from, unsigned int n); -unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int n); -unsigned int copy_to_unsafe_ll(void *to, const void *from, unsigned int n); -unsigned int copy_from_unsafe_ll(void *to, const void *from, unsigned int n); - -extern long __get_user_bad(void); -extern void __put_user_bad(void); - -#define UA_KEEP(args...) args -#define UA_DROP(args...) - -/** - * get_guest: - Get a simple variable from guest space. - * @x: Variable to store result. - * @ptr: Source address, in guest space. - * - * This macro load a single simple variable from guest space. - * It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Returns zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ -#define get_guest(x, ptr) get_guest_check(x, ptr, sizeof(*(ptr))) - -/** - * put_guest: - Write a simple value into guest space. - * @x: Value to store in guest space. - * @ptr: Destination address, in guest space. - * - * This macro stores a single simple value from to guest space. - * It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. - * - * Returns zero on success, or -EFAULT on error. - */ -#define put_guest(x, ptr) \ - put_guest_check((__typeof__(*(ptr)))(x), ptr, sizeof(*(ptr))) - -/** - * __get_guest: - Get a simple variable from guest space, with less checking. - * @x: Variable to store result. - * @ptr: Source address, in guest space. - * - * This macro copies a single simple variable from guest space to hypervisor - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Returns zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ -#define __get_guest(x, ptr) get_guest_nocheck(x, ptr, sizeof(*(ptr))) - -/** - * __put_guest: - Write a simple value into guest space, with less checking. - * @x: Value to store in guest space. - * @ptr: Destination address, in guest space. - * - * This macro copies a single simple value from hypervisor space to guest - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Returns zero on success, or -EFAULT on error. - */ -#define __put_guest(x, ptr) \ - put_guest_nocheck((__typeof__(*(ptr)))(x), ptr, sizeof(*(ptr))) - -#define put_unsafe(x, ptr) \ -({ \ - int err_; \ - put_unsafe_size(x, ptr, sizeof(*(ptr)), UA_DROP, err_, -EFAULT);\ - err_; \ -}) - -#define put_guest_nocheck(x, ptr, size) \ -({ \ - int err_; \ - put_guest_size(x, ptr, size, err_, -EFAULT); \ - err_; \ -}) - -#define put_guest_check(x, ptr, size) \ -({ \ - __typeof__(*(ptr)) __user *ptr_ = (ptr); \ - __typeof__(size) size_ = (size); \ - access_ok(ptr_, size_) ? put_guest_nocheck(x, ptr_, size_) \ - : -EFAULT; \ -}) - -#define get_unsafe(x, ptr) \ -({ \ - int err_; \ - get_unsafe_size(x, ptr, sizeof(*(ptr)), UA_DROP, err_, -EFAULT);\ - err_; \ -}) - -#define get_guest_nocheck(x, ptr, size) \ -({ \ - int err_; \ - get_guest_size(x, ptr, size, err_, -EFAULT); \ - err_; \ -}) - -#define get_guest_check(x, ptr, size) \ -({ \ - __typeof__(*(ptr)) __user *ptr_ = (ptr); \ - __typeof__(size) size_ = (size); \ - access_ok(ptr_, size_) ? get_guest_nocheck(x, ptr_, size_) \ - : -EFAULT; \ -}) - -struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(const struct __large_struct *)(x)) - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ -#define put_unsafe_asm(x, addr, GUARD, err, itype, rtype, ltype, errret) \ - __asm__ __volatile__( \ - GUARD( \ - " guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \ - ) \ - "1: mov"itype" %"rtype"[val], (%[ptr])\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %[errno], %[ret]\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : [ret] "+r" (err), [ptr] "=&r" (dummy_) \ - GUARD(, [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_)) \ - : [val] ltype (x), "m" (__m(addr)), \ - "[ptr]" (addr), [errno] "i" (errret)) - -#define get_unsafe_asm(x, addr, GUARD, err, rtype, ltype, errret) \ - __asm__ __volatile__( \ - GUARD( \ - " guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \ - ) \ - "1: mov (%[ptr]), %"rtype"[val]\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %[errno], %[ret]\n" \ - " xor %k[val], %k[val]\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : [ret] "+r" (err), [val] ltype (x), \ - [ptr] "=&r" (dummy_) \ - GUARD(, [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_)) \ - : "m" (__m(addr)), "[ptr]" (addr), \ - [errno] "i" (errret)) - -#define put_unsafe_size(x, ptr, size, grd, retval, errret) \ -do { \ - retval = 0; \ - stac(); \ - switch ( size ) \ - { \ - long dummy_; \ - case 1: \ - put_unsafe_asm(x, ptr, grd, retval, "b", "b", "iq", errret); \ - break; \ - case 2: \ - put_unsafe_asm(x, ptr, grd, retval, "w", "w", "ir", errret); \ - break; \ - case 4: \ - put_unsafe_asm(x, ptr, grd, retval, "l", "k", "ir", errret); \ - break; \ - case 8: \ - put_unsafe_asm(x, ptr, grd, retval, "q", "", "ir", errret); \ - break; \ - default: __put_user_bad(); \ - } \ - clac(); \ -} while ( false ) - -#define put_guest_size(x, ptr, size, retval, errret) \ - put_unsafe_size(x, ptr, size, UA_KEEP, retval, errret) - -#define get_unsafe_size(x, ptr, size, grd, retval, errret) \ -do { \ - retval = 0; \ - stac(); \ - switch ( size ) \ - { \ - long dummy_; \ - case 1: get_unsafe_asm(x, ptr, grd, retval, "b", "=q", errret); break; \ - case 2: get_unsafe_asm(x, ptr, grd, retval, "w", "=r", errret); break; \ - case 4: get_unsafe_asm(x, ptr, grd, retval, "k", "=r", errret); break; \ - case 8: get_unsafe_asm(x, ptr, grd, retval, "", "=r", errret); break; \ - default: __get_user_bad(); \ - } \ - clac(); \ -} while ( false ) - -#define get_guest_size(x, ptr, size, retval, errret) \ - get_unsafe_size(x, ptr, size, UA_KEEP, retval, errret) - -/** - * __copy_to_guest_pv: - Copy a block of data into guest space, with less - * checking - * @to: Destination address, in guest space. - * @from: Source address, in hypervisor space. - * @n: Number of bytes to copy. - * - * Copy data from hypervisor space to guest space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -static always_inline unsigned long -__copy_to_guest_pv(void __user *to, const void *from, unsigned long n) -{ - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - put_guest_size(*(const uint8_t *)from, to, 1, ret, 1); - return ret; - case 2: - put_guest_size(*(const uint16_t *)from, to, 2, ret, 2); - return ret; - case 4: - put_guest_size(*(const uint32_t *)from, to, 4, ret, 4); - return ret; - case 8: - put_guest_size(*(const uint64_t *)from, to, 8, ret, 8); - return ret; - } - } - return copy_to_guest_ll(to, from, n); -} - -/** - * __copy_from_guest_pv: - Copy a block of data from guest space, with less - * checking - * @to: Destination address, in hypervisor space. - * @from: Source address, in guest space. - * @n: Number of bytes to copy. - * - * Copy data from guest space to hypervisor space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -static always_inline unsigned long -__copy_from_guest_pv(void *to, const void __user *from, unsigned long n) -{ - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - get_guest_size(*(uint8_t *)to, from, 1, ret, 1); - return ret; - case 2: - get_guest_size(*(uint16_t *)to, from, 2, ret, 2); - return ret; - case 4: - get_guest_size(*(uint32_t *)to, from, 4, ret, 4); - return ret; - case 8: - get_guest_size(*(uint64_t *)to, from, 8, ret, 8); - return ret; - } - } - return copy_from_guest_ll(to, from, n); -} - -/** - * copy_to_unsafe: - Copy a block of data to unsafe space, with exception - * checking - * @to: Unsafe destination address. - * @from: Safe source address, in hypervisor space. - * @n: Number of bytes to copy. - * - * Copy data from hypervisor space to a potentially unmapped area. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -static always_inline unsigned int -copy_to_unsafe(void __user *to, const void *from, unsigned int n) -{ - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - put_unsafe_size(*(const uint8_t *)from, to, 1, UA_DROP, ret, 1); - return ret; - case 2: - put_unsafe_size(*(const uint16_t *)from, to, 2, UA_DROP, ret, 2); - return ret; - case 4: - put_unsafe_size(*(const uint32_t *)from, to, 4, UA_DROP, ret, 4); - return ret; - case 8: - put_unsafe_size(*(const uint64_t *)from, to, 8, UA_DROP, ret, 8); - return ret; - } - } - - return copy_to_unsafe_ll(to, from, n); -} - -/** - * copy_from_unsafe: - Copy a block of data from unsafe space, with exception - * checking - * @to: Safe destination address, in hypervisor space. - * @from: Unsafe source address. - * @n: Number of bytes to copy. - * - * Copy data from a potentially unmapped area space to hypervisor space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -static always_inline unsigned int -copy_from_unsafe(void *to, const void __user *from, unsigned int n) -{ - if ( __builtin_constant_p(n) ) - { - unsigned long ret; - - switch ( n ) - { - case 1: - get_unsafe_size(*(uint8_t *)to, from, 1, UA_DROP, ret, 1); - return ret; - case 2: - get_unsafe_size(*(uint16_t *)to, from, 2, UA_DROP, ret, 2); - return ret; - case 4: - get_unsafe_size(*(uint32_t *)to, from, 4, UA_DROP, ret, 4); - return ret; - case 8: - get_unsafe_size(*(uint64_t *)to, from, 8, UA_DROP, ret, 8); - return ret; - } - } - - return copy_from_unsafe_ll(to, from, n); -} - -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - s32 addr, cont; -}; -extern struct exception_table_entry __start___ex_table[]; -extern struct exception_table_entry __stop___ex_table[]; -extern struct exception_table_entry __start___pre_ex_table[]; -extern struct exception_table_entry __stop___pre_ex_table[]; - -union stub_exception_token { - struct { - uint16_t ec; - uint8_t trapnr; - } fields; - unsigned long raw; -}; - -extern unsigned long search_exception_table(const struct cpu_user_regs *regs); -extern void sort_exception_tables(void); -extern void sort_exception_table(struct exception_table_entry *start, - const struct exception_table_entry *stop); - -#endif /* __X86_UACCESS_H__ */ diff --git a/xen/include/asm-x86/unaligned.h b/xen/include/asm-x86/unaligned.h deleted file mode 100644 index 6070801d4a..0000000000 --- a/xen/include/asm-x86/unaligned.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_UNALIGNED_H__ -#define __ASM_UNALIGNED_H__ - -#include - -#endif /* __ASM_UNALIGNED_H__ */ diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h deleted file mode 100644 index 0756124075..0000000000 --- a/xen/include/asm-x86/vm_event.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * vm_event.h: architecture specific vm_event handling routines - * - * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - */ - -#ifndef __ASM_X86_VM_EVENT_H__ -#define __ASM_X86_VM_EVENT_H__ - -#include -#include - -/* - * Should we emulate the next matching instruction on VCPU resume - * after a vm_event? - */ -struct arch_vm_event { - uint32_t emulate_flags; - union { - struct vm_event_emul_read_data read; - struct vm_event_emul_insn_data insn; - } emul; - struct monitor_write_data write_data; - struct vm_event_regs_x86 gprs; - bool set_gprs; - /* A sync vm_event has been sent and we're not done handling it. */ - bool sync_event; - /* Send mem access events from emulator */ - bool send_event; -}; - -int vm_event_init_domain(struct domain *d); - -void vm_event_cleanup_domain(struct domain *d); - -void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, - vm_event_response_t *rsp); - -void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp); - -void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp); - -void vm_event_sync_event(struct vcpu *v, bool value); - -void vm_event_reset_vmtrace(struct vcpu *v); - -#endif /* __ASM_X86_VM_EVENT_H__ */ diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h deleted file mode 100644 index e5709bd44a..0000000000 --- a/xen/include/asm-x86/vpmu.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * vpmu.h: PMU virtualization for HVM domain. - * - * Copyright (c) 2007, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; If not, see . - * - * Author: Haitao Shan - */ - -#ifndef __ASM_X86_HVM_VPMU_H_ -#define __ASM_X86_HVM_VPMU_H_ - -#include - -#define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu) -#define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu) -#define vpmu_available(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_AVAILABLE) - -#define MSR_TYPE_COUNTER 0 -#define MSR_TYPE_CTRL 1 -#define MSR_TYPE_GLOBAL 2 -#define MSR_TYPE_ARCH_COUNTER 3 -#define MSR_TYPE_ARCH_CTRL 4 - -/* Start of PMU register bank */ -#define vpmu_reg_pointer(ctxt, offset) ((void *)((uintptr_t)ctxt + \ - (uintptr_t)ctxt->offset)) - -/* Arch specific operations shared by all vpmus */ -struct arch_vpmu_ops { - int (*initialise)(struct vcpu *v); - int (*do_wrmsr)(unsigned int msr, uint64_t msr_content); - int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content); - int (*do_interrupt)(struct cpu_user_regs *regs); - void (*arch_vpmu_destroy)(struct vcpu *v); - int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest); - int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest); - void (*arch_vpmu_dump)(const struct vcpu *); -}; - -const struct arch_vpmu_ops *core2_vpmu_init(void); -const struct arch_vpmu_ops *amd_vpmu_init(void); -const struct arch_vpmu_ops *hygon_vpmu_init(void); - -struct vpmu_struct { - u32 flags; - u32 last_pcpu; - u32 hw_lapic_lvtpc; - void *context; /* May be shared with PV guest */ - void *priv_context; /* hypervisor-only */ - struct xen_pmu_data *xenpmu_data; - spinlock_t vpmu_lock; -}; - -/* VPMU states */ -#define VPMU_INITIALIZED 0x0001 -#define VPMU_CONTEXT_ALLOCATED 0x0002 -#define VPMU_CONTEXT_LOADED 0x0004 -#define VPMU_RUNNING 0x0008 -#define VPMU_CONTEXT_SAVE 0x0010 /* Force context save */ -#define VPMU_FROZEN 0x0020 /* Stop counters while VCPU is not running */ -#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x0040 -/* PV(H) guests: VPMU registers are accessed by guest from shared page */ -#define VPMU_CACHED 0x0080 -#define VPMU_AVAILABLE 0x0100 - -/* Intel-specific VPMU features */ -#define VPMU_CPU_HAS_DS 0x1000 /* Has Debug Store */ -#define VPMU_CPU_HAS_BTS 0x2000 /* Has Branch Trace Store */ - -static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask) -{ - vpmu->flags |= mask; -} -static inline void vpmu_reset(struct vpmu_struct *vpmu, const u32 mask) -{ - vpmu->flags &= ~mask; -} -static inline void vpmu_clear(struct vpmu_struct *vpmu) -{ - /* VPMU_AVAILABLE should be altered by get/put_vpmu(). */ - vpmu->flags &= VPMU_AVAILABLE; -} -static inline bool_t vpmu_is_set(const struct vpmu_struct *vpmu, const u32 mask) -{ - return !!(vpmu->flags & mask); -} -static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu, - const u32 mask) -{ - return !!((vpmu->flags & mask) == mask); -} - -void vpmu_lvtpc_update(uint32_t val); -int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, bool is_write); -void vpmu_do_interrupt(struct cpu_user_regs *regs); -void vpmu_initialise(struct vcpu *v); -void vpmu_destroy(struct vcpu *v); -void vpmu_save(struct vcpu *v); -int vpmu_load(struct vcpu *v, bool_t from_guest); -void vpmu_dump(struct vcpu *v); - -static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) -{ - return vpmu_do_msr(msr, &msr_content, true /* write */); -} -static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) -{ - return vpmu_do_msr(msr, msr_content, false /* read */); -} - -extern unsigned int vpmu_mode; -extern unsigned int vpmu_features; - -/* Context switch */ -static inline void vpmu_switch_from(struct vcpu *prev) -{ - if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) ) - vpmu_save(prev); -} - -static inline void vpmu_switch_to(struct vcpu *next) -{ - if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) ) - vpmu_load(next, 0); -} - -#endif /* __ASM_X86_HVM_VPMU_H_*/ - diff --git a/xen/include/asm-x86/x86-defns.h b/xen/include/asm-x86/x86-defns.h deleted file mode 100644 index 28628807cb..0000000000 --- a/xen/include/asm-x86/x86-defns.h +++ /dev/null @@ -1,156 +0,0 @@ -#ifndef __XEN_X86_DEFNS_H__ -#define __XEN_X86_DEFNS_H__ - -/* - * EFLAGS bits - */ -#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ -#define X86_EFLAGS_MBS 0x00000002 /* Resvd bit */ -#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ -#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ -#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ -#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ -#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ -#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ -#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ -#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ -#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ -#define X86_EFLAGS_NT 0x00004000 /* Nested Task */ -#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ -#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ -#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ -#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ -#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ -#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ - -#define X86_EFLAGS_ARITH_MASK \ - (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \ - X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF) - -/* - * Intel CPU flags in CR0 - */ -#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */ -#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */ -#define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */ -#define X86_CR0_TS 0x00000008 /* Task Switched (RW) */ -#define X86_CR0_ET 0x00000010 /* Extension type (RO) */ -#define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */ -#define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */ -#define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */ -#define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */ -#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */ -#define X86_CR0_PG 0x80000000 /* Paging (RW) */ - -/* - * Intel CPU flags in CR3 - */ -#define X86_CR3_NOFLUSH (_AC(1, ULL) << 63) -#define X86_CR3_ADDR_MASK (PAGE_MASK & PADDR_MASK) -#define X86_CR3_PCID_MASK _AC(0x0fff, ULL) /* Mask for PCID */ - -/* - * Intel CPU features in CR4 - */ -#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */ -#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */ -#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */ -#define X86_CR4_DE 0x00000008 /* enable debugging extensions */ -#define X86_CR4_PSE 0x00000010 /* enable page size extensions */ -#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */ -#define X86_CR4_MCE 0x00000040 /* Machine check enable */ -#define X86_CR4_PGE 0x00000080 /* enable global pages */ -#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */ -#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ -#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ -#define X86_CR4_UMIP 0x00000800 /* enable UMIP */ -#define X86_CR4_LA57 0x00001000 /* enable 5-level paging */ -#define X86_CR4_VMXE 0x00002000 /* enable VMX */ -#define X86_CR4_SMXE 0x00004000 /* enable SMX */ -#define X86_CR4_FSGSBASE 0x00010000 /* enable {rd,wr}{fs,gs}base */ -#define X86_CR4_PCIDE 0x00020000 /* enable PCID */ -#define X86_CR4_OSXSAVE 0x00040000 /* enable XSAVE/XRSTOR */ -#define X86_CR4_SMEP 0x00100000 /* enable SMEP */ -#define X86_CR4_SMAP 0x00200000 /* enable SMAP */ -#define X86_CR4_PKE 0x00400000 /* enable PKE */ -#define X86_CR4_CET 0x00800000 /* Control-flow Enforcement Technology */ - -/* - * XSTATE component flags in XCR0 - */ -#define X86_XCR0_FP_POS 0 -#define X86_XCR0_FP (1ULL << X86_XCR0_FP_POS) -#define X86_XCR0_SSE_POS 1 -#define X86_XCR0_SSE (1ULL << X86_XCR0_SSE_POS) -#define X86_XCR0_YMM_POS 2 -#define X86_XCR0_YMM (1ULL << X86_XCR0_YMM_POS) -#define X86_XCR0_BNDREGS_POS 3 -#define X86_XCR0_BNDREGS (1ULL << X86_XCR0_BNDREGS_POS) -#define X86_XCR0_BNDCSR_POS 4 -#define X86_XCR0_BNDCSR (1ULL << X86_XCR0_BNDCSR_POS) -#define X86_XCR0_OPMASK_POS 5 -#define X86_XCR0_OPMASK (1ULL << X86_XCR0_OPMASK_POS) -#define X86_XCR0_ZMM_POS 6 -#define X86_XCR0_ZMM (1ULL << X86_XCR0_ZMM_POS) -#define X86_XCR0_HI_ZMM_POS 7 -#define X86_XCR0_HI_ZMM (1ULL << X86_XCR0_HI_ZMM_POS) -#define X86_XCR0_PKRU_POS 9 -#define X86_XCR0_PKRU (1ULL << X86_XCR0_PKRU_POS) -#define X86_XCR0_LWP_POS 62 -#define X86_XCR0_LWP (1ULL << X86_XCR0_LWP_POS) - -/* - * Debug status flags in DR6. - */ -#define X86_DR6_DEFAULT 0xffff0ff0 /* Default %dr6 value. */ - -/* - * Debug control flags in DR7. - */ -#define X86_DR7_DEFAULT 0x00000400 /* Default %dr7 value. */ - -/* - * Invalidation types for the INVPCID instruction. - */ -#define X86_INVPCID_INDIV_ADDR 0 -#define X86_INVPCID_SINGLE_CTXT 1 -#define X86_INVPCID_ALL_INCL_GLOBAL 2 -#define X86_INVPCID_ALL_NON_GLOBAL 3 - -#define X86_NR_VECTORS 256 - -/* Exception Vectors */ -#define X86_EXC_DE 0 /* Divide Error */ -#define X86_EXC_DB 1 /* Debug Exception */ -#define X86_EXC_NMI 2 /* NMI */ -#define X86_EXC_BP 3 /* Breakpoint */ -#define X86_EXC_OF 4 /* Overflow */ -#define X86_EXC_BR 5 /* BOUND Range */ -#define X86_EXC_UD 6 /* Invalid Opcode */ -#define X86_EXC_NM 7 /* Device Not Available */ -#define X86_EXC_DF 8 /* Double Fault */ -#define X86_EXC_CSO 9 /* Coprocessor Segment Overrun */ -#define X86_EXC_TS 10 /* Invalid TSS */ -#define X86_EXC_NP 11 /* Segment Not Present */ -#define X86_EXC_SS 12 /* Stack-Segment Fault */ -#define X86_EXC_GP 13 /* General Porection Fault */ -#define X86_EXC_PF 14 /* Page Fault */ -#define X86_EXC_SPV 15 /* PIC Spurious Interrupt Vector */ -#define X86_EXC_MF 16 /* Maths fault (x87 FPU) */ -#define X86_EXC_AC 17 /* Alignment Check */ -#define X86_EXC_MC 18 /* Machine Check */ -#define X86_EXC_XM 19 /* SIMD Exception */ -#define X86_EXC_VE 20 /* Virtualisation Exception */ -#define X86_EXC_CP 21 /* Control-flow Protection */ -#define X86_EXC_HV 28 /* Hypervisor Injection */ -#define X86_EXC_VC 29 /* VMM Communication */ -#define X86_EXC_SX 30 /* Security Exception */ - -/* Bitmap of exceptions which have error codes. */ -#define X86_EXC_HAVE_EC \ - ((1u << X86_EXC_DF) | (1u << X86_EXC_TS) | (1u << X86_EXC_NP) | \ - (1u << X86_EXC_SS) | (1u << X86_EXC_GP) | (1u << X86_EXC_PF) | \ - (1u << X86_EXC_AC) | (1u << X86_EXC_CP) | \ - (1u << X86_EXC_VC) | (1u << X86_EXC_SX)) - -#endif /* __XEN_X86_DEFNS_H__ */ diff --git a/xen/include/asm-x86/x86-vendors.h b/xen/include/asm-x86/x86-vendors.h deleted file mode 100644 index 0a37024cbd..0000000000 --- a/xen/include/asm-x86/x86-vendors.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef __XEN_X86_VENDORS_H__ -#define __XEN_X86_VENDORS_H__ - -/* - * CPU vendor IDs - * - * - X86_VENDOR_* are Xen-internal identifiers. The order is arbitrary, but - * values form a bitmap so vendor checks can be made against multiple - * vendors at once. - * - X86_VENDOR_*_E?X are architectural information from CPUID leaf 0 - */ -#define X86_VENDOR_UNKNOWN 0 - -#define X86_VENDOR_INTEL (1 << 0) -#define X86_VENDOR_INTEL_EBX 0x756e6547U /* "GenuineIntel" */ -#define X86_VENDOR_INTEL_ECX 0x6c65746eU -#define X86_VENDOR_INTEL_EDX 0x49656e69U - -#define X86_VENDOR_AMD (1 << 1) -#define X86_VENDOR_AMD_EBX 0x68747541U /* "AuthenticAMD" */ -#define X86_VENDOR_AMD_ECX 0x444d4163U -#define X86_VENDOR_AMD_EDX 0x69746e65U - -#define X86_VENDOR_CENTAUR (1 << 2) -#define X86_VENDOR_CENTAUR_EBX 0x746e6543U /* "CentaurHauls" */ -#define X86_VENDOR_CENTAUR_ECX 0x736c7561U -#define X86_VENDOR_CENTAUR_EDX 0x48727561U - -#define X86_VENDOR_SHANGHAI (1 << 3) -#define X86_VENDOR_SHANGHAI_EBX 0x68532020U /* " Shanghai " */ -#define X86_VENDOR_SHANGHAI_ECX 0x20206961U -#define X86_VENDOR_SHANGHAI_EDX 0x68676e61U - -#define X86_VENDOR_HYGON (1 << 4) -#define X86_VENDOR_HYGON_EBX 0x6f677948U /* "HygonGenuine" */ -#define X86_VENDOR_HYGON_ECX 0x656e6975U -#define X86_VENDOR_HYGON_EDX 0x6e65476eU - -#endif /* __XEN_X86_VENDORS_H__ */ diff --git a/xen/include/asm-x86/x86_64/efibind.h b/xen/include/asm-x86/x86_64/efibind.h deleted file mode 100644 index ddcfae07ec..0000000000 --- a/xen/include/asm-x86/x86_64/efibind.h +++ /dev/null @@ -1,280 +0,0 @@ -/*++ - -Copyright (c) 1998 Intel Corporation - -Module Name: - - efefind.h - -Abstract: - - EFI to compile bindings - - - - -Revision History - ---*/ - -#ifndef __GNUC__ -#pragma pack() -#endif - -// -// Basic int types of various widths -// - -#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L ) - - // No ANSI C 1999/2000 stdint.h integer width declarations - - #if defined(_MSC_EXTENSIONS) - - // Use Microsoft C compiler integer width declarations - - typedef unsigned __int64 uint64_t; - typedef __int64 int64_t; - typedef unsigned __int32 uint32_t; - typedef __int32 int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef char int8_t; - #elif defined(__GNUC__) - typedef unsigned long long uint64_t __attribute__((aligned (8))); - typedef long long int64_t __attribute__((aligned (8))); - typedef unsigned int uint32_t; - typedef int int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef char int8_t; - #elif defined(UNIX_LP64) - - /* Use LP64 programming model from C_FLAGS for integer width declarations */ - - typedef unsigned long uint64_t; - typedef long int64_t; - typedef unsigned int uint32_t; - typedef int int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef char int8_t; - #else - - /* Assume P64 programming model from C_FLAGS for integer width declarations */ - - typedef unsigned long long uint64_t __attribute__((aligned (8))); - typedef long long int64_t __attribute__((aligned (8))); - typedef unsigned int uint32_t; - typedef int int32_t; - typedef unsigned short uint16_t; - typedef short int16_t; - typedef unsigned char uint8_t; - typedef char int8_t; - #endif -#endif - -// -// Basic EFI types of various widths -// - -#ifndef __WCHAR_TYPE__ -# define __WCHAR_TYPE__ short -#endif - -typedef uint64_t UINT64; -typedef int64_t INT64; - -#ifndef _BASETSD_H_ - typedef uint32_t UINT32; - typedef int32_t INT32; -#endif - -typedef uint16_t UINT16; -typedef int16_t INT16; -typedef uint8_t UINT8; -typedef int8_t INT8; -typedef __WCHAR_TYPE__ WCHAR; - -#undef VOID -#define VOID void - - -typedef int64_t INTN; -typedef uint64_t UINTN; - -#ifdef EFI_NT_EMULATOR - #define POST_CODE(_Data) -#else - #ifdef EFI_DEBUG -#define POST_CODE(_Data) __asm mov eax,(_Data) __asm out 0x80,al - #else - #define POST_CODE(_Data) - #endif -#endif - -#define EFIERR(a) (0x8000000000000000 | a) -#define EFI_ERROR_MASK 0x8000000000000000 -#define EFIERR_OEM(a) (0xc000000000000000 | a) - - -#define BAD_POINTER 0xFBFBFBFBFBFBFBFB -#define MAX_ADDRESS 0xFFFFFFFFFFFFFFFF - -#ifdef EFI_NT_EMULATOR - #define BREAKPOINT() __asm { int 3 } -#else - #define BREAKPOINT() while (TRUE); // Make it hang on Bios[Dbg]32 -#endif - -// -// Pointers must be aligned to these address to function -// - -#define MIN_ALIGNMENT_SIZE 4 - -#define ALIGN_VARIABLE(Value ,Adjustment) \ - (UINTN)Adjustment = 0; \ - if((UINTN)Value % MIN_ALIGNMENT_SIZE) \ - (UINTN)Adjustment = MIN_ALIGNMENT_SIZE - ((UINTN)Value % MIN_ALIGNMENT_SIZE); \ - Value = (UINTN)Value + (UINTN)Adjustment - - -// -// Define macros to build data structure signatures from characters. -// - -#define EFI_SIGNATURE_16(A,B) ((A) | (B<<8)) -#define EFI_SIGNATURE_32(A,B,C,D) (EFI_SIGNATURE_16(A,B) | (EFI_SIGNATURE_16(C,D) << 16)) -#define EFI_SIGNATURE_64(A,B,C,D,E,F,G,H) (EFI_SIGNATURE_32(A,B,C,D) | ((UINT64)(EFI_SIGNATURE_32(E,F,G,H)) << 32)) -// -// To export & import functions in the EFI emulator environment -// - -#ifdef EFI_NT_EMULATOR - #define EXPORTAPI __declspec( dllexport ) -#else - #define EXPORTAPI -#endif - - -// -// EFIAPI - prototype calling convention for EFI function pointers -// BOOTSERVICE - prototype for implementation of a boot service interface -// RUNTIMESERVICE - prototype for implementation of a runtime service interface -// RUNTIMEFUNCTION - prototype for implementation of a runtime function that is not a service -// RUNTIME_CODE - pragma macro for declaring runtime code -// - -#ifndef EFIAPI // Forces EFI calling conventions reguardless of compiler options - #ifdef _MSC_EXTENSIONS - #define EFIAPI __cdecl // Force C calling convention for Microsoft C compiler - #elif __clang__ || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4) - #define EFIAPI __attribute__((__ms_abi__)) // Force Microsoft ABI - #else - #define EFIAPI // Substitute expresion to force C calling convention - #endif -#endif - -#define BOOTSERVICE -//#define RUNTIMESERVICE(proto,a) alloc_text("rtcode",a); proto a -//#define RUNTIMEFUNCTION(proto,a) alloc_text("rtcode",a); proto a -#define RUNTIMESERVICE -#define RUNTIMEFUNCTION - - -#define RUNTIME_CODE(a) alloc_text("rtcode", a) -#define BEGIN_RUNTIME_DATA() data_seg("rtdata") -#define END_RUNTIME_DATA() data_seg("") - -#define VOLATILE volatile - -#define MEMORY_FENCE() - -#ifdef EFI_NT_EMULATOR - -// -// To help ensure proper coding of integrated drivers, they are -// compiled as DLLs. In NT they require a dll init entry pointer. -// The macro puts a stub entry point into the DLL so it will load. -// - -#define EFI_DRIVER_ENTRY_POINT(InitFunction) \ - UINTN \ - __stdcall \ - _DllMainCRTStartup ( \ - UINTN Inst, \ - UINTN reason_for_call, \ - VOID *rserved \ - ) \ - { \ - return 1; \ - } \ - \ - int \ - EXPORTAPI \ - __cdecl \ - InitializeDriver ( \ - void *ImageHandle, \ - void *SystemTable \ - ) \ - { \ - return InitFunction(ImageHandle, SystemTable); \ - } - - - #define LOAD_INTERNAL_DRIVER(_if, type, name, entry) \ - (_if)->LoadInternal(type, name, NULL) - -#else // EFI_NT_EMULATOR - -// -// When build similiar to FW, then link everything together as -// one big module. -// - - #define EFI_DRIVER_ENTRY_POINT(InitFunction) \ - UINTN \ - InitializeDriver ( \ - VOID *ImageHandle, \ - VOID *SystemTable \ - ) \ - { \ - return InitFunction(ImageHandle, \ - SystemTable); \ - } \ - \ - EFI_STATUS efi_main( \ - EFI_HANDLE image, \ - EFI_SYSTEM_TABLE *systab \ - ) __attribute__((weak, \ - alias ("InitializeDriver"))); - - #define LOAD_INTERNAL_DRIVER(_if, type, name, entry) \ - (_if)->LoadInternal(type, name, entry) - -#endif // EFI_FW_NT - -// -// Some compilers don't support the forward reference construct: -// typedef struct XXXXX -// -// The following macro provide a workaround for such cases. -// -#ifdef NO_INTERFACE_DECL -#define INTERFACE_DECL(x) -#else -#ifdef __GNUC__ -#define INTERFACE_DECL(x) struct x -#else -#define INTERFACE_DECL(x) typedef struct x -#endif -#endif - -#ifdef _MSC_EXTENSIONS -#pragma warning ( disable : 4731 ) // Suppress warnings about modification of EBP -#endif - diff --git a/xen/include/asm-x86/x86_64/elf.h b/xen/include/asm-x86/x86_64/elf.h deleted file mode 100644 index 00227e0e12..0000000000 --- a/xen/include/asm-x86/x86_64/elf.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef __X86_64_ELF_H__ -#define __X86_64_ELF_H__ - -#include -#include - -typedef struct { - unsigned long r15; - unsigned long r14; - unsigned long r13; - unsigned long r12; - unsigned long rbp; - unsigned long rbx; - unsigned long r11; - unsigned long r10; - unsigned long r9; - unsigned long r8; - unsigned long rax; - unsigned long rcx; - unsigned long rdx; - unsigned long rsi; - unsigned long rdi; - unsigned long orig_rax; - unsigned long rip; - unsigned long cs; - unsigned long rflags; - unsigned long rsp; - unsigned long ss; - unsigned long thread_fs; - unsigned long thread_gs; - unsigned long ds; - unsigned long es; - unsigned long fs; - unsigned long gs; -} ELF_Gregset; - -static inline void elf_core_save_regs(ELF_Gregset *core_regs, - crash_xen_core_t *xen_core_regs) -{ - asm ( "movq %%r15, %0" : "=m" (core_regs->r15) ); - asm ( "movq %%r14, %0" : "=m" (core_regs->r14) ); - asm ( "movq %%r13, %0" : "=m" (core_regs->r13) ); - asm ( "movq %%r12, %0" : "=m" (core_regs->r12) ); - asm ( "movq %%rbp, %0" : "=m" (core_regs->rbp) ); - asm ( "movq %%rbx, %0" : "=m" (core_regs->rbx) ); - asm ( "movq %%r11, %0" : "=m" (core_regs->r11) ); - asm ( "movq %%r10, %0" : "=m" (core_regs->r10) ); - asm ( "movq %%r9, %0" : "=m" (core_regs->r9) ); - asm ( "movq %%r8, %0" : "=m" (core_regs->r8) ); - asm ( "movq %%rax, %0" : "=m" (core_regs->rax) ); - asm ( "movq %%rcx, %0" : "=m" (core_regs->rcx) ); - asm ( "movq %%rdx, %0" : "=m" (core_regs->rdx) ); - asm ( "movq %%rsi, %0" : "=m" (core_regs->rsi) ); - asm ( "movq %%rdi, %0" : "=m" (core_regs->rdi) ); - - /* orig_rax not filled in for now */ - asm ( "call 0f; 0: popq %0" : "=m" (core_regs->rip) ); - core_regs->cs = read_sreg(cs); - asm ( "pushfq; popq %0" : "=m" (core_regs->rflags) ); - asm ( "movq %%rsp, %0" : "=m" (core_regs->rsp) ); - core_regs->ss = read_sreg(ss); - rdmsrl(MSR_FS_BASE, core_regs->thread_fs); - rdmsrl(MSR_GS_BASE, core_regs->thread_gs); - core_regs->ds = read_sreg(ds); - core_regs->es = read_sreg(es); - core_regs->fs = read_sreg(fs); - core_regs->gs = read_sreg(gs); - - asm ( "mov %%cr0, %0" : "=r" (xen_core_regs->cr0) ); - asm ( "mov %%cr2, %0" : "=r" (xen_core_regs->cr2) ); - asm ( "mov %%cr3, %0" : "=r" (xen_core_regs->cr3) ); - asm ( "mov %%cr4, %0" : "=r" (xen_core_regs->cr4) ); -} - -#endif /* __X86_64_ELF_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h deleted file mode 100644 index cb1db107c4..0000000000 --- a/xen/include/asm-x86/x86_64/page.h +++ /dev/null @@ -1,166 +0,0 @@ - -#ifndef __X86_64_PAGE_H__ -#define __X86_64_PAGE_H__ - -#define __XEN_VIRT_START XEN_VIRT_START - -#define VADDR_TOP_BIT (1UL << (VADDR_BITS - 1)) -#define CANONICAL_MASK (~0UL & ~VADDR_MASK) - -#define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63)) - -#ifndef __ASSEMBLY__ - -static inline unsigned long canonicalise_addr(unsigned long addr) -{ - if ( addr & VADDR_TOP_BIT ) - return addr | CANONICAL_MASK; - else - return addr & ~CANONICAL_MASK; -} - -#include - -#include - -/* - * Note: These are solely for the use by page_{get,set}_owner(), and - * therefore don't need to handle the XEN_VIRT_{START,END} range. - */ -#define virt_to_pdx(va) (((unsigned long)(va) - DIRECTMAP_VIRT_START) >> \ - PAGE_SHIFT) -#define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \ - ((unsigned long)(pdx) << PAGE_SHIFT))) - -static inline unsigned long __virt_to_maddr(unsigned long va) -{ - ASSERT(va < DIRECTMAP_VIRT_END); - if ( va >= DIRECTMAP_VIRT_START ) - va -= DIRECTMAP_VIRT_START; - else - { - BUILD_BUG_ON(XEN_VIRT_END - XEN_VIRT_START != GB(1)); - /* Signed, so ((long)XEN_VIRT_START >> 30) fits in an imm32. */ - ASSERT(((long)va >> (PAGE_ORDER_1G + PAGE_SHIFT)) == - ((long)XEN_VIRT_START >> (PAGE_ORDER_1G + PAGE_SHIFT))); - - va += xen_phys_start - XEN_VIRT_START; - } - return (va & ma_va_bottom_mask) | - ((va << pfn_pdx_hole_shift) & ma_top_mask); -} - -static inline void *__maddr_to_virt(unsigned long ma) -{ - ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT)); - return (void *)(DIRECTMAP_VIRT_START + - ((ma & ma_va_bottom_mask) | - ((ma & ma_top_mask) >> pfn_pdx_hole_shift))); -} - -/* read access (should only be used for debug printk's) */ -typedef u64 intpte_t; -#define PRIpte "016lx" - -typedef struct { intpte_t l1; } l1_pgentry_t; -typedef struct { intpte_t l2; } l2_pgentry_t; -typedef struct { intpte_t l3; } l3_pgentry_t; -typedef struct { intpte_t l4; } l4_pgentry_t; -typedef l4_pgentry_t root_pgentry_t; - -#endif /* !__ASSEMBLY__ */ - -#define pte_read_atomic(ptep) read_atomic(ptep) -#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte) -#define pte_write(ptep, pte) write_atomic(ptep, pte) - -/* Given a virtual address, get an entry offset into a linear page table. */ -#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT) -#define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT) -#define l3_linear_offset(_a) (((_a) & VADDR_MASK) >> L3_PAGETABLE_SHIFT) -#define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT) - -#define is_guest_l2_slot(_d, _t, _s) \ - ( !((_t) & PGT_pae_xen_l2) || \ - ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) ) -#define is_guest_l4_slot(_d, _s) \ - ( is_pv_32bit_domain(_d) \ - ? ((_s) == 0) \ - : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \ - ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))) - -#define root_table_offset l4_table_offset -#define root_get_pfn l4e_get_pfn -#define root_get_flags l4e_get_flags -#define root_get_intpte l4e_get_intpte -#define root_empty l4e_empty -#define root_from_paddr l4e_from_paddr -#define PGT_root_page_table PGT_l4_page_table - -/* - * PTE pfn and flags: - * 40-bit pfn = (pte[51:12]) - * 24-bit flags = (pte[63:52],pte[11:0]) - */ - -/* Extract flags into 24-bit integer, or turn 24-bit flags into a pte mask. */ -#ifndef __ASSEMBLY__ -static inline unsigned int get_pte_flags(intpte_t x) -{ - return ((x >> 40) & ~0xfff) | (x & 0xfff); -} - -static inline intpte_t put_pte_flags(unsigned int x) -{ - return (((intpte_t)x & ~0xfff) << 40) | (x & 0xfff); -} -#endif - -/* - * Protection keys define a new 4-bit protection key field - * (PKEY) in bits 62:59 of leaf entries of the page tables. - * This corresponds to bit 22:19 of a 24-bit flags. - * - * Notice: Bit 22 is used by _PAGE_GNTTAB which is visible to PV guests, - * so Protection keys must be disabled on PV guests. - */ -#define _PAGE_PKEY_BITS (0x780000) /* Protection Keys, 22:19 */ - -#define get_pte_pkey(x) (MASK_EXTR(get_pte_flags(x), _PAGE_PKEY_BITS)) - -/* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/ -#define _PAGE_NX_BIT (1U<<23) - -/* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/ -#define _PAGE_GNTTAB (1U<<22) - -/* - * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte. - * This is needed to distinguish between user and kernel PTEs since _PAGE_USER - * is asserted for both. - */ -#define _PAGE_GUEST_KERNEL (1U<<12) - -#define PAGE_HYPERVISOR_RO (__PAGE_HYPERVISOR_RO | _PAGE_GLOBAL) -#define PAGE_HYPERVISOR_RW (__PAGE_HYPERVISOR_RW | _PAGE_GLOBAL) -#define PAGE_HYPERVISOR_RX (__PAGE_HYPERVISOR_RX | _PAGE_GLOBAL) -#define PAGE_HYPERVISOR_RWX (__PAGE_HYPERVISOR | _PAGE_GLOBAL) -#define PAGE_HYPERVISOR_SHSTK (__PAGE_HYPERVISOR_SHSTK | _PAGE_GLOBAL) - -#define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW -#define PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR_UCMINUS | \ - _PAGE_GLOBAL | _PAGE_NX) -#define PAGE_HYPERVISOR_UC (__PAGE_HYPERVISOR_UC | \ - _PAGE_GLOBAL | _PAGE_NX) - -#endif /* __X86_64_PAGE_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/x86_64/regs.h b/xen/include/asm-x86/x86_64/regs.h deleted file mode 100644 index 171cf9a2e2..0000000000 --- a/xen/include/asm-x86/x86_64/regs.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _X86_64_REGS_H -#define _X86_64_REGS_H - -#include -#include - -#define ring_0(r) (((r)->cs & 3) == 0) -#define ring_1(r) (((r)->cs & 3) == 1) -#define ring_2(r) (((r)->cs & 3) == 2) -#define ring_3(r) (((r)->cs & 3) == 3) - -#define guest_kernel_mode(v, r) \ - (!is_pv_32bit_vcpu(v) ? \ - (ring_3(r) && ((v)->arch.flags & TF_kernel_mode)) : \ - (ring_1(r))) - -#define permit_softint(dpl, v, r) \ - ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3)) - -/* Check for null trap callback handler: Is the EIP null? */ -#define null_trap_bounce(v, tb) \ - (!is_pv_32bit_vcpu(v) ? ((tb)->eip == 0) : (((tb)->cs & ~3) == 0)) - -/* Number of bytes of on-stack execution state to be context-switched. */ -/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */ -#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es)) - -#endif diff --git a/xen/include/asm-x86/x86_64/system.h b/xen/include/asm-x86/x86_64/system.h deleted file mode 100644 index e94371cf20..0000000000 --- a/xen/include/asm-x86/x86_64/system.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef __X86_64_SYSTEM_H__ -#define __X86_64_SYSTEM_H__ - -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ - (unsigned long)(n),sizeof(*(ptr)))) - -/* - * Atomic 16 bytes compare and exchange. Compare OLD with MEM, if - * identical, store NEW in MEM. Return the initial value in MEM. - * Success is indicated by comparing RETURN with OLD. - * - * This function can only be called when cpu_has_cx16 is true. - */ - -static always_inline __uint128_t __cmpxchg16b( - volatile void *ptr, const __uint128_t *oldp, const __uint128_t *newp) -{ - union { - struct { uint64_t lo, hi; }; - __uint128_t raw; - } new = { .raw = *newp }, old = { .raw = *oldp }, prev; - - ASSERT(cpu_has_cx16); - - /* Don't use "=A" here - clang can't deal with that. */ - asm volatile ( "lock cmpxchg16b %[ptr]" - : "=d" (prev.hi), "=a" (prev.lo), - [ptr] "+m" (*(volatile __uint128_t *)ptr) - : "c" (new.hi), "b" (new.lo), "d" (old.hi), "a" (old.lo) ); - - return prev.raw; -} - -static always_inline __uint128_t cmpxchg16b_local_( - void *ptr, const __uint128_t *oldp, const __uint128_t *newp) -{ - union { - struct { uint64_t lo, hi; }; - __uint128_t raw; - } new = { .raw = *newp }, old = { .raw = *oldp }, prev; - - ASSERT(cpu_has_cx16); - - /* Don't use "=A" here - clang can't deal with that. */ - asm volatile ( "cmpxchg16b %[ptr]" - : "=d" (prev.hi), "=a" (prev.lo), - [ptr] "+m" (*(__uint128_t *)ptr) - : "c" (new.hi), "b" (new.lo), "d" (old.hi), "a" (old.lo) ); - - return prev.raw; -} - -#define cmpxchg16b(ptr, o, n) ({ \ - volatile void *_p = (ptr); \ - ASSERT(!((unsigned long)_p & 0xf)); \ - BUILD_BUG_ON(sizeof(*(o)) != sizeof(__uint128_t)); \ - BUILD_BUG_ON(sizeof(*(n)) != sizeof(__uint128_t)); \ - __cmpxchg16b(_p, (void *)(o), (void *)(n)); \ -}) - -#endif /* __X86_64_SYSTEM_H__ */ diff --git a/xen/include/asm-x86/x86_64/uaccess.h b/xen/include/asm-x86/x86_64/uaccess.h deleted file mode 100644 index ba79f950fb..0000000000 --- a/xen/include/asm-x86/x86_64/uaccess.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef __X86_64_UACCESS_H -#define __X86_64_UACCESS_H - -/* - * With CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS (apparent) PV guest accesses - * are prohibited to touch the Xen private VA range. The compat argument - * translation area, therefore, can't live within this range. Domains - * (potentially) in need of argument translation (32-bit PV, possibly HVM) get - * a secondary mapping installed, which needs to be used for such accesses in - * the PV case, and will also be used for HVM to avoid extra conditionals. - */ -#define COMPAT_ARG_XLAT_VIRT_BASE ((void *)ARG_XLAT_START(current) + \ - (PERDOMAIN_ALT_VIRT_START - \ - PERDOMAIN_VIRT_START)) -#define COMPAT_ARG_XLAT_SIZE (2*PAGE_SIZE) -struct vcpu; -int setup_compat_arg_xlat(struct vcpu *v); -void free_compat_arg_xlat(struct vcpu *v); -#define is_compat_arg_xlat_range(addr, size) ({ \ - unsigned long __off; \ - __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \ - (__off < COMPAT_ARG_XLAT_SIZE) && \ - ((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE); \ -}) - -#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE) -#define xlat_page_size COMPAT_ARG_XLAT_SIZE -#define xlat_page_left_size(xlat_page_current) \ - (xlat_page_start + xlat_page_size - xlat_page_current) - -#define xlat_malloc_init(xlat_page_current) do { \ - xlat_page_current = xlat_page_start; \ -} while (0) - -extern void *xlat_malloc(unsigned long *xlat_page_current, size_t size); - -#define xlat_malloc_array(_p, _t, _c) ((_t *) xlat_malloc(&_p, sizeof(_t) * _c)) - -/* - * Valid if in +ve half of 48-bit address space, or above Xen-reserved area. - * This is also valid for range checks (addr, addr+size). As long as the - * start address is outside the Xen-reserved area, sequential accesses - * (starting at addr) will hit a non-canonical address (and thus fault) - * before ever reaching VIRT_START. - */ -#define __addr_ok(addr) \ - (((unsigned long)(addr) < (1UL<<47)) || \ - ((unsigned long)(addr) >= HYPERVISOR_VIRT_END)) - -#define access_ok(addr, size) \ - (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size)) - -#define array_access_ok(addr, count, size) \ - (likely(((count) ?: 0UL) < (~0UL / (size))) && \ - access_ok(addr, (count) * (size))) - -#define __compat_addr_ok(d, addr) \ - ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d)) - -#define __compat_access_ok(d, addr, size) \ - __compat_addr_ok(d, (unsigned long)(addr) + ((size) ? (size) - 1 : 0)) - -#define compat_access_ok(addr, size) \ - __compat_access_ok(current->domain, addr, size) - -#define compat_array_access_ok(addr,count,size) \ - (likely((count) < (~0U / (size))) && \ - compat_access_ok(addr, 0 + (count) * (size))) - -#endif /* __X86_64_UACCESS_H */ diff --git a/xen/include/asm-x86/x86_emulate.h b/xen/include/asm-x86/x86_emulate.h deleted file mode 100644 index 9125807e24..0000000000 --- a/xen/include/asm-x86/x86_emulate.h +++ /dev/null @@ -1,21 +0,0 @@ -/****************************************************************************** - * x86_emulate.h - * - * Wrapper for generic x86 instruction decoder and emulator. - * - * Copyright (c) 2008, Citrix Systems, Inc. - * - * Authors: - * Keir Fraser - */ - -#ifndef __ASM_X86_X86_EMULATE_H__ -#define __ASM_X86_X86_EMULATE_H__ - -#include -#include -#include - -#include "../../arch/x86/x86_emulate/x86_emulate.h" - -#endif /* __ASM_X86_X86_EMULATE_H__ */ diff --git a/xen/include/asm-x86/xenoprof.h b/xen/include/asm-x86/xenoprof.h deleted file mode 100644 index cf6af8c5df..0000000000 --- a/xen/include/asm-x86/xenoprof.h +++ /dev/null @@ -1,107 +0,0 @@ -/****************************************************************************** - * asm-x86/xenoprof.h - * xenoprof x86 arch specific header file - * - * Copyright (c) 2006 Isaku Yamahata - * VA Linux Systems Japan K.K. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; If not, see . - */ - -#ifndef __ASM_X86_XENOPROF_H__ -#define __ASM_X86_XENOPROF_H__ - -struct vcpu; - -#ifdef CONFIG_XENOPROF - -#include - -int nmi_reserve_counters(void); -int nmi_setup_events(void); -int nmi_enable_virq(void); -int nmi_start(void); -void nmi_stop(void); -void nmi_disable_virq(void); -void nmi_release_counters(void); - -int xenoprof_arch_init(int *num_events, char *cpu_type); -#define xenoprof_arch_reserve_counters() nmi_reserve_counters() -#define xenoprof_arch_setup_events() nmi_setup_events() -#define xenoprof_arch_enable_virq() nmi_enable_virq() -#define xenoprof_arch_start() nmi_start() -#define xenoprof_arch_stop() nmi_stop() -#define xenoprof_arch_disable_virq() nmi_disable_virq() -#define xenoprof_arch_release_counters() nmi_release_counters() - -int xenoprof_arch_counter(XEN_GUEST_HANDLE_PARAM(void) arg); -int compat_oprof_arch_counter(XEN_GUEST_HANDLE_PARAM(void) arg); -int xenoprof_arch_ibs_counter(XEN_GUEST_HANDLE_PARAM(void) arg); - -struct cpu_user_regs; - -/* AMD IBS support */ -void ibs_init(void); -extern u32 ibs_caps; - -int xenoprofile_get_mode(struct vcpu *, const struct cpu_user_regs *); - -static inline int xenoprof_backtrace_supported(void) -{ - return 1; -} - -void xenoprof_backtrace(struct vcpu *, const struct cpu_user_regs *, - unsigned long depth, int mode); - -int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content); -int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content); -void passive_domain_destroy(struct vcpu *v); - -bool nmi_oprofile_send_virq(void); - -#else - -static inline int passive_domain_do_rdmsr(unsigned int msr, - uint64_t *msr_content) -{ - return 0; -} - -static inline int passive_domain_do_wrmsr(unsigned int msr, - uint64_t msr_content) -{ - return 0; -} - -static inline void passive_domain_destroy(struct vcpu *v) {} - -static inline bool nmi_oprofile_send_virq(void) -{ - return false; -} - -#endif /* CONFIG_XENOPROF */ - -#endif /* __ASM_X86_XENOPROF_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h deleted file mode 100644 index 7ab0bdde89..0000000000 --- a/xen/include/asm-x86/xstate.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * include/asm-i386/xstate.h - * - * x86 extended state (xsave/xrstor) related definitions - * - */ - -#ifndef __ASM_XSTATE_H -#define __ASM_XSTATE_H - -#include -#include -#include - -#define FCW_DEFAULT 0x037f -#define FCW_RESET 0x0040 -#define MXCSR_DEFAULT 0x1f80 - -extern uint32_t mxcsr_mask; - -#define XSTATE_CPUID 0x0000000d - -#define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */ - -#define XSAVE_HDR_SIZE 64 -#define XSAVE_SSE_OFFSET 160 -#define XSTATE_YMM_SIZE 256 -#define FXSAVE_SIZE 512 -#define XSAVE_HDR_OFFSET FXSAVE_SIZE -#define XSTATE_AREA_MIN_SIZE (FXSAVE_SIZE + XSAVE_HDR_SIZE) - -#define XSTATE_FP_SSE (X86_XCR0_FP | X86_XCR0_SSE) -#define XCNTXT_MASK (X86_XCR0_FP | X86_XCR0_SSE | X86_XCR0_YMM | \ - X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM | \ - XSTATE_NONLAZY) - -#define XSTATE_ALL (~(1ULL << 63)) -#define XSTATE_NONLAZY (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | X86_XCR0_PKRU) -#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY) -#define XSTATE_XSAVES_ONLY 0 -#define XSTATE_COMPACTION_ENABLED (1ULL << 63) - -#define XSTATE_ALIGN64 (1U << 1) - -extern u64 xfeature_mask; -extern u64 xstate_align; -extern unsigned int *xstate_offsets; -extern unsigned int *xstate_sizes; - -/* extended state save area */ -struct __attribute__((aligned (64))) xsave_struct -{ - union __attribute__((aligned(16))) { /* FPU/MMX, SSE */ - char x[512]; - struct { - uint16_t fcw; - uint16_t fsw; - uint8_t ftw; - uint8_t rsvd1; - uint16_t fop; - union { - uint64_t addr; - struct { - uint32_t offs; - uint16_t sel; - uint16_t rsvd; - }; - } fip, fdp; - uint32_t mxcsr; - uint32_t mxcsr_mask; - /* data registers follow here */ - }; - } fpu_sse; - - struct xsave_hdr { - u64 xstate_bv; - u64 xcomp_bv; - u64 reserved[6]; - } xsave_hdr; /* The 64-byte header */ - - char data[]; /* Variable layout states */ -}; - -struct xstate_bndcsr { - uint64_t bndcfgu; - uint64_t bndstatus; -}; - -/* extended state operations */ -bool __must_check set_xcr0(u64 xfeatures); -uint64_t get_xcr0(void); -void set_msr_xss(u64 xss); -uint64_t get_msr_xss(void); -uint64_t read_bndcfgu(void); -void xsave(struct vcpu *v, uint64_t mask); -void xrstor(struct vcpu *v, uint64_t mask); -void xstate_set_init(uint64_t mask); -bool xsave_enabled(const struct vcpu *v); -int __must_check validate_xstate(const struct domain *d, - uint64_t xcr0, uint64_t xcr0_accum, - const struct xsave_hdr *hdr); -int __must_check handle_xsetbv(u32 index, u64 new_bv); -void expand_xsave_states(struct vcpu *v, void *dest, unsigned int size); -void compress_xsave_states(struct vcpu *v, const void *src, unsigned int size); - -/* extended state init and cleanup functions */ -void xstate_free_save_area(struct vcpu *v); -int xstate_alloc_save_area(struct vcpu *v); -void xstate_init(struct cpuinfo_x86 *c); -unsigned int xstate_ctxt_size(u64 xcr0); - -static inline uint64_t xgetbv(unsigned int index) -{ - uint32_t lo, hi; - - ASSERT(index); /* get_xcr0() should be used instead. */ - asm volatile ( ".byte 0x0f,0x01,0xd0" /* xgetbv */ - : "=a" (lo), "=d" (hi) : "c" (index) ); - - return lo | ((uint64_t)hi << 32); -} - -static inline bool xstate_all(const struct vcpu *v) -{ - /* - * XSTATE_FP_SSE may be excluded, because the offsets of XSTATE_FP_SSE - * (in the legacy region of xsave area) are fixed, so saving - * XSTATE_FP_SSE will not cause overwriting problem with XSAVES/XSAVEC. - */ - return (v->arch.xsave_area->xsave_hdr.xcomp_bv & - XSTATE_COMPACTION_ENABLED) && - (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE); -} - -static inline bool __nonnull(1) -xsave_area_compressed(const struct xsave_struct *xsave_area) -{ - return xsave_area->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED; -} - -#endif /* __ASM_XSTATE_H */ diff --git a/xen/include/xen/acpi.h b/xen/include/xen/acpi.h index 088c238a50..08834f1402 100644 --- a/xen/include/xen/acpi.h +++ b/xen/include/xen/acpi.h @@ -40,8 +40,9 @@ #define ACPI_MADT_GET_TRIGGER(inti) ACPI_MADT_GET_(TRIGGER, inti) /* - * Fixmap pages to reserve for ACPI boot-time tables (see asm-x86/fixmap.h or - * asm-arm/config.h, 64 pages(256KB) is large enough for most cases.) + * Fixmap pages to reserve for ACPI boot-time tables (see + * arch/x86/include/asm/fixmap.h or arch/arm/include/asm/config.h, + * 64 pages(256KB) is large enough for most cases.) */ #define NUM_FIXMAP_ACPI_PAGES 64 diff --git a/xen/include/xen/bitmap.h b/xen/include/xen/bitmap.h index e9175ab54a..3caf92c76d 100644 --- a/xen/include/xen/bitmap.h +++ b/xen/include/xen/bitmap.h @@ -14,7 +14,7 @@ * * Function implementations generic to all architectures are in * lib/bitmap.c. Functions implementations that are architecture - * specific are in various include/asm-/bitops.h headers + * specific are in various asm/bitops.h headers * and other arch/ specific files. * * See lib/bitmap.c for more details.