3ddb79c3xjYnrv5t3VqYlR4tNEOl4Q xen/include/asm-x86/page.h
3e450943kzme29HPCtq5HNOVQkddfw xen/include/asm-x86/param.h
3ddb79c3ysKUbxZuwKBRK3WXU2TlEg xen/include/asm-x86/pci.h
+404f1bb41Yl-5ZjIWnG66HDCj6OIWA xen/include/asm-x86/pda.h
4022a73diKn2Ax4-R4gzk59lm1YdDg xen/include/asm-x86/pdb.h
3ddb79c2QF5-pZGzuX4QukPCDAl59A xen/include/asm-x86/processor.h
3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/ptrace.h
3ddb79c4HugMq7IYGxcQKFBpKwKhzA xen/include/asm-x86/types.h
3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/uaccess.h
3ddb79c3uPGcP_l_2xyGgBSWd5aC-Q xen/include/asm-x86/unaligned.h
-404f1b9b_phpQlRnyiWqP6RodfZDpg xen/include/asm-x86/x86_64/config.h
404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h
404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86/x86_64/desc.h
404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h
404f1bb1LSCqrMDSfRAti5NdMQPJBQ xen/include/asm-x86/x86_64/page.h
-404f1bb41Yl-5ZjIWnG66HDCj6OIWA xen/include/asm-x86/x86_64/pda.h
404f1bb756fZfxk5HDx7J7BW3R-1jQ xen/include/asm-x86/x86_64/processor.h
404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/ptrace.h
404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
400304fcmRQmDdFYEzDh0wcBba9alg xen/include/hypervisor-ifs/COPYING
-404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/hypervisor-ifs/arch-x86/hypervisor-if.h
-404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/include/hypervisor-ifs/arch-x86_64/hypervisor-if.h
+404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/hypervisor-ifs/arch-x86_32.h
+404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/include/hypervisor-ifs/arch-x86_64.h
3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h
3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h
@$(MAKEBOOT) clean
archmrproper:
- rm -f include/asm-xen/hypervisor-ifs/arch
archdep:
- rm -f include/asm-xen/hypervisor-ifs/arch
- ( cd include/asm-xen/hypervisor-ifs ; rm -rf arch ; ln -sf arch-x86 arch)
@$(MAKEBOOT) dep
make-links: delete-links
ln -sf asm-$(TARGET_ARCH) include/asm
- ln -sf arch-$(TARGET_ARCH) include/hypervisor-ifs/arch
delete-links:
- rm -f include/asm include/hypervisor-ifs/arch
+ rm -f include/asm
# Blow away kernel.o because build info is stored statically within it.
delete-unfresh-files:
CFLAGS := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing -O3
CFLAGS += -iwithprefix include -Wall -Werror -DMONITOR_BASE=$(MONITOR_BASE)
CFLAGS += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__
-CFLAGS += -Wno-pointer-arith -Wredundant-decls -D$(TARGET_SUBARCH)
+CFLAGS += -Wno-pointer-arith -Wredundant-decls
LDFLAGS := -T xen.lds -N
ifeq ($(TARGET_SUBARCH),x86_64)
CFLAGS += -m64
-LDARCHFLAGS :=
+LDARCHFLAGS := --oformat elf64-x86-64
endif
mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
/* Create read-only mapping of MPT for guest-OS use. */
- idle_pg_table[READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ idle_pg_table[RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT];
mk_l2_readonly(idle_pg_table +
- (READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT));
+ (RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT));
/* Set up mapping cache for domain pages. */
mapcache = (unsigned long *)get_free_page(GFP_KERNEL);
#include <asm/i387.h>
#include <xen/shadow.h>
-#ifdef CONFIG_X86_64BITMODE
+#if defined(__x86_64__)
#define ELFSIZE 64
#else
#define ELFSIZE 32
#define CONFIG_XEN_ATTENTION_KEY 1
-
#define HZ 100
/*
* Just to keep compiler happy.
* NB. DO NOT CHANGE SMP_CACHE_BYTES WITHOUT FIXING arch/i386/entry.S!!!
* It depends on size of irq_cpustat_t, for example, being 64 bytes. :-)
- * Mmmm... so niiiiiice....
*/
#define SMP_CACHE_BYTES 64
#define NR_CPUS 16
#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#define ____cacheline_aligned __cacheline_aligned
-/*** Hypervisor owns top 64MB of virtual address space. ***/
+/*
+ * Amount of slack domain memory to leave in system, in megabytes.
+ * Prevents a hard out-of-memory crunch for things like network receive.
+ */
+#define SLACK_DOMAIN_MEM_KILOBYTES 2048
+
+/* Linkage for x86 */
+#define FASTCALL(x) x __attribute__((regparm(3)))
+#define asmlinkage __attribute__((regparm(0)))
+#define __ALIGN .align 16,0x90
+#define __ALIGN_STR ".align 16,0x90"
+#define SYMBOL_NAME_STR(X) #X
+#define SYMBOL_NAME(X) X
+#define SYMBOL_NAME_LABEL(X) X##:
+#ifdef __ASSEMBLY__
+#define ALIGN __ALIGN
+#define ALIGN_STR __ALIGN_STR
+#define ENTRY(name) \
+ .globl SYMBOL_NAME(name); \
+ ALIGN; \
+ SYMBOL_NAME_LABEL(name)
+#endif
+
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+#define NR_syscalls 256
+
+#ifndef NDEBUG
+#define MEMORY_GUARD
+#endif
+
+#ifndef __ASSEMBLY__
+extern unsigned long _end; /* standard ELF symbol */
+extern void __out_of_line_bug(int line) __attribute__((noreturn));
+#define out_of_line_bug() __out_of_line_bug(__LINE__)
+#endif /* __ASSEMBLY__ */
+
+#if defined(__x86_64__)
+
+#define PML4_ENTRY_BITS 39
+#define PML4_ENTRY_BYTES (1<<PML4_ENTRY_BITS)
+
+/*
+ * Memory layout:
+ * 0x0000000000000000 - 0x00007fffffffffff [128TB, 2^47 bytes, PML4:0-255]
+ * Guest-defined use.
+ * 0x0000800000000000 - 0xffff7fffffffffff [16EB]
+ * Inaccessible: current arch only supports 48-bit sign-extended VAs.
+ * 0xffff800000000000 - 0xffff803fffffffff [256GB, 2^38 bytes, PML4:256]
+ * Read-only machine-to-phys translation table (GUEST ACCESSIBLE).
+ * 0xffff804000000000 - 0xffff807fffffffff [256GB, 2^38 bytes, PML4:256]
+ * Reserved for future shared info with the guest OS (GUEST ACCESSIBLE).
+ * 0xffff808000000000 - 0xffff80ffffffffff [512GB, 2^39 bytes, PML4:257]
+ * Read-only guest linear page table (GUEST ACCESSIBLE).
+ * 0xffff810000000000 - 0xffff817fffffffff [512GB, 2^39 bytes, PML4:258]
+ * Guest linear page table.
+ * 0xffff818000000000 - 0xffff81ffffffffff [512GB, 2^39 bytes, PML4:259]
+ * Shadow linear page table.
+ * 0xffff820000000000 - 0xffff827fffffffff [512GB, 2^39 bytes, PML4:260]
+ * Per-domain mappings (e.g., GDT, LDT).
+ * 0xffff828000000000 - 0xffff8287ffffffff [512GB, 2^39 bytes, PML4:261]
+ * Reserved for future use.
+ * 0xffff830000000000 - 0xffff83ffffffffff [1TB, 2^40 bytes, PML4:262-263]
+ * 1:1 direct mapping of all physical memory. Xen and its heap live here.
+ * 0xffff840000000000 - 0xffff87ffffffffff [4TB, 2^42 bytes, PML4:264-271]
+ * Reserved for future use.
+ * 0xffff880000000000 - 0xffffffffffffffff [120TB, PML4:272-511]
+ * Guest-defined use.
+ */
+
+/* Hypervisor reserves PML4 slots 256 to 271 inclusive. */
+#define HYPERVISOR_VIRT_START (0xFFFF800000000000UL)
+#define HYPERVISOR_VIRT_END (0xFFFF880000000000UL)
+/* Slot 256: read-only guest-accessible machine-to-phys translation table. */
+#define RO_MPT_VIRT_START (HYPERVISOR_VIRT_START)
+#define RO_MPT_VIRT_END (RO_MPT_VIRT_START + PML4_ENTRY_BYTES/2)
+/* Slot 257: read-only guest-accessible linear page table. */
+#define RO_LINEAR_PT_VIRT_START (RO_MPT_VIRT_END + PML4_ENTRY_BYTES/2)
+#define RO_LINEAR_PT_VIRT_END (RO_LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES)
+/* Slot 258: linear page table (guest table). */
+#define LINEAR_PT_VIRT_START (RO_LINEAR_PT_VIRT_END)
+#define LINEAR_PT_VIRT_END (LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES)
+/* Slot 259: linear page table (shadow table). */
+#define SH_LINEAR_PT_VIRT_START (LINEAR_PT_VIRT_END)
+#define SH_LINEAR_PT_VIRT_END (SH_LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES)
+/* Slot 260: per-domain mappings. */
+#define PERDOMAIN_VIRT_START (SH_LINEAR_PT_VIRT_END)
+#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + PML4_ENTRY_BYTES)
+/* Slot 262-263: A direct 1:1 mapping of all of physical memory. */
+#define DIRECTMAP_VIRT_START (PERDOMAIN_VIRT_END + PML4_ENTRY_BYTES)
+#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + PML4_ENTRY_BYTES*2)
+
+#define PGT_base_page_table PGT_l4_page_table
+
+#define __HYPERVISOR_CS64 0x0810
+#define __HYPERVISOR_CS32 0x0808
+#define __HYPERVISOR_DS 0x0818
+
+/* For generic assembly code: use macros to define operation/operand sizes. */
+#define __OS "q" /* Operation Suffix */
+#define __OP "r" /* Operand Prefix */
+
+#elif defined(__i386__)
+
+/* Hypervisor owns top 64MB of virtual address space. */
#define HYPERVISOR_VIRT_START (0xFC000000UL)
/*
* First 4MB are mapped read-only for all. It's for the machine->physical
* mapping table (MPT table). The following are virtual addresses.
*/
-#define READONLY_MPT_VIRT_START (HYPERVISOR_VIRT_START)
-#define READONLY_MPT_VIRT_END (READONLY_MPT_VIRT_START + (4*1024*1024))
+#define RO_MPT_VIRT_START (HYPERVISOR_VIRT_START)
+#define RO_MPT_VIRT_END (RO_MPT_VIRT_START + (4*1024*1024))
/*
* Next 12MB is fixed monitor space, which is part of a 40MB direct-mapped
* memory region. The following are machine addresses.
#define MAX_MONITOR_ADDRESS (12*1024*1024)
#define MAX_DIRECTMAP_ADDRESS (40*1024*1024)
/* And the virtual addresses for the direct-map region... */
-#define DIRECTMAP_VIRT_START (READONLY_MPT_VIRT_END)
+#define DIRECTMAP_VIRT_START (RO_MPT_VIRT_END)
#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + MAX_DIRECTMAP_ADDRESS)
#define MONITOR_VIRT_START (DIRECTMAP_VIRT_START)
#define MONITOR_VIRT_END (MONITOR_VIRT_START + MAX_MONITOR_ADDRESS)
#define IOREMAP_VIRT_START (MAPCACHE_VIRT_END)
#define IOREMAP_VIRT_END (IOREMAP_VIRT_START + (4*1024*1024))
-/*
- * Amount of slack domain memory to leave in system, in megabytes.
- * Prevents a hard out-of-memory crunch for thinsg like network receive.
- */
-#define SLACK_DOMAIN_MEM_KILOBYTES 2048
-
-/* Linkage for x86 */
-#define FASTCALL(x) x __attribute__((regparm(3)))
-#define asmlinkage __attribute__((regparm(0)))
-#define __ALIGN .align 16,0x90
-#define __ALIGN_STR ".align 16,0x90"
-#define SYMBOL_NAME_STR(X) #X
-#define SYMBOL_NAME(X) X
-#define SYMBOL_NAME_LABEL(X) X##:
-#ifdef __ASSEMBLY__
-#define ALIGN __ALIGN
-#define ALIGN_STR __ALIGN_STR
-#define ENTRY(name) \
- .globl SYMBOL_NAME(name); \
- ALIGN; \
- SYMBOL_NAME_LABEL(name)
-#endif
-
#define PGT_base_page_table PGT_l2_page_table
-#define barrier() __asm__ __volatile__("": : :"memory")
-
#define __HYPERVISOR_CS 0x0808
#define __HYPERVISOR_DS 0x0810
-#define NR_syscalls 256
-
-#ifndef NDEBUG
-#define MEMORY_GUARD
-#endif
-
-#ifndef __ASSEMBLY__
-extern unsigned long _end; /* standard ELF symbol */
-extern void __out_of_line_bug(int line) __attribute__((noreturn));
-#define out_of_line_bug() __out_of_line_bug(__LINE__)
-#endif /* __ASSEMBLY__ */
-
/* For generic assembly code: use macros to define operation/operand sizes. */
#define __OS "l" /* Operation Suffix */
#define __OP "e" /* Operand Prefix */
+#endif /* __i386__ */
+
#endif /* __XEN_I386_CONFIG_H__ */
#define rdtscl(low) \
__asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-#ifdef x86_32
+#if defined(__i386__)
#define rdtscll(val) \
__asm__ __volatile__("rdtsc" : "=A" (val))
-#else
+#elif defined(__x86_64__)
#define rdtscll(val) do { \
unsigned int a,d; \
asm volatile("rdtsc" : "=a" (a), "=d" (d)); \
--- /dev/null
+#ifndef X86_64_PDA_H
+#define X86_64_PDA_H
+
+#include <xen/cache.h>
+
+/* Per processor datastructure. %gs points to it while the kernel runs */
+/* To use a new field with the *_pda macros it needs to be added to tools/offset.c */
+struct x8664_pda {
+ unsigned long kernelstack; /* TOS for current process */
+ unsigned long oldrsp; /* user rsp for system call */
+ unsigned long irqrsp; /* Old rsp for interrupts. */
+ struct task_struct *pcurrent; /* Current process */
+ int irqcount; /* Irq nesting counter. Starts with -1 */
+ int cpunumber; /* Logical CPU number */
+ /* XXX: could be a single list */
+ unsigned long *pgd_quick;
+ unsigned long *pmd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
+ char *irqstackptr; /* top of irqstack */
+ unsigned long volatile *level4_pgt;
+} ____cacheline_aligned;
+
+#define PDA_STACKOFFSET (5*8)
+
+#define IRQSTACK_ORDER 2
+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+
+extern struct x8664_pda cpu_pda[];
+
+/*
+ * There is no fast way to get the base address of the PDA, all the accesses
+ * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
+ */
+#define sizeof_field(type,field) (sizeof(((type *)0)->field))
+#define typeof_field(type,field) typeof(((type *)0)->field)
+
+extern void __bad_pda_field(void);
+/* Don't use offsetof because it requires too much infrastructure */
+#define pda_offset(field) ((unsigned long)&((struct x8664_pda *)0)->field)
+
+#define pda_to_op(op,field,val) do { \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+ case 2: asm volatile(op "w %0,%%gs:%P1" :: "r" (val), "i"(pda_offset(field)):"memory"); break; \
+ case 4: asm volatile(op "l %0,%%gs:%P1" :: "r" (val), "i"(pda_offset(field)):"memory"); break; \
+ case 8: asm volatile(op "q %0,%%gs:%P1" :: "r" (val), "i"(pda_offset(field)):"memory"); break; \
+ default: __bad_pda_field(); \
+ } \
+ } while (0)
+
+
+#define pda_from_op(op,field) ({ \
+ typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+ case 2: asm volatile(op "w %%gs:%P1,%0":"=r" (ret__): "i" (pda_offset(field)):"memory"); break; \
+ case 4: asm volatile(op "l %%gs:%P1,%0":"=r" (ret__): "i" (pda_offset(field)):"memory"); break; \
+ case 8: asm volatile(op "q %%gs:%P1,%0":"=r" (ret__): "i" (pda_offset(field)):"memory"); break; \
+ default: __bad_pda_field(); \
+ } \
+ ret__; })
+
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+
+#endif
* so this is correct in the x86 case.
*/
-#ifdef x86_32
+#if defined(__i386__)
#define smp_processor_id() (current->processor)
-#else
+#elif defined(__x86_64__)
#include <asm/pda.h>
#define smp_processor_id() read_pda(cpunumber)
#endif
return dest;
}
+#define __HAVE_ARCH_MEMCMP
#define memcmp __builtin_memcmp
#define __HAVE_ARCH_MEMCHR
:"m" (*__xg(ptr)), "0" (x)
:"memory");
break;
-#ifdef x86_32
+#if defined(__i386__)
case 4:
__asm__ __volatile__("xchgl %0,%1"
:"=r" (x)
:"m" (*__xg(ptr)), "0" (x)
:"memory");
break;
-#else
+#elif defined(__x86_64__)
case 4:
__asm__ __volatile__("xchgl %k0,%1"
:"=r" (x)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
-#ifdef x86_32
+#if defined(__i386__)
case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
-#else
+#elif defined(__x86_64__)
case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
: "=a"(prev)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* interrupt control.. */
-#ifdef x86_64
-#define __save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
-#define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
-#else
+#if defined(__i386__)
#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
+#elif defined(__x86_64__)
+#define __save_flags(x) do { __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
+#define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
#endif
#define __cli() __asm__ __volatile__("cli": : :"memory")
#define __sti() __asm__ __volatile__("sti": : :"memory")
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* For spinlocks etc */
-#ifdef x86_64
-#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
-#define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
-#else
+#if defined(__i386__)
#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
#define local_irq_restore(x) __restore_flags(x)
+#elif defined(__x86_64__)
+#define local_irq_save(x) do { __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
+#define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
#endif
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()
typedef unsigned int __u32;
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-#ifdef x86_32
+#if defined(__i386__)
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
-#else
+#elif defined(__x86_64__)
typedef __signed__ long __s64;
typedef unsigned long __u64;
#endif
typedef signed int s32;
typedef unsigned int u32;
-#ifdef x86_32
+#if defined(__i386__)
typedef signed long long s64;
typedef unsigned long long u64;
#define BITS_PER_LONG 32
-#else
+#elif defined(__x86_64__)
typedef signed long s64;
typedef unsigned long u64;
#define BITS_PER_LONG 64
+++ /dev/null
-/******************************************************************************
- * config.h
- *
- * A Linux-style configuration list.
- *
- */
-
-#ifndef __XEN_X86_64_CONFIG_H__
-#define __XEN_X86_64_CONFIG_H__
-
-#define CONFIG_X86 1
-#define CONFIG_X86_64BITMODE 1
-
-#define CONFIG_SMP 1
-#define CONFIG_X86_LOCAL_APIC 1
-#define CONFIG_X86_IO_APIC 1
-#define CONFIG_X86_L1_CACHE_SHIFT 5
-
-#define CONFIG_PCI 1
-#define CONFIG_PCI_BIOS 1
-#define CONFIG_PCI_DIRECT 1
-
-#define CONFIG_IDE 1
-#define CONFIG_BLK_DEV_IDE 1
-#define CONFIG_BLK_DEV_IDEDMA 1
-#define CONFIG_BLK_DEV_IDEPCI 1
-#define CONFIG_IDEDISK_MULTI_MODE 1
-#define CONFIG_IDEDISK_STROKE 1
-#define CONFIG_IDEPCI_SHARE_IRQ 1
-#define CONFIG_BLK_DEV_IDEDMA_PCI 1
-#define CONFIG_IDEDMA_PCI_AUTO 1
-#define CONFIG_IDEDMA_AUTO 1
-#define CONFIG_IDEDMA_ONLYDISK 1
-#define CONFIG_BLK_DEV_IDE_MODES 1
-#define CONFIG_BLK_DEV_PIIX 1
-
-#define CONFIG_SCSI 1
-#define CONFIG_SCSI_LOGGING 1
-#define CONFIG_BLK_DEV_SD 1
-#define CONFIG_SD_EXTRA_DEVS 40
-#define CONFIG_SCSI_MULTI_LUN 1
-
-#define CONFIG_XEN_ATTENTION_KEY 1
-
-#define HZ 100
-
-/*
- * Just to keep compiler happy.
- * NB. DO NOT CHANGE SMP_CACHE_BYTES WITHOUT FIXING arch/i386/entry.S!!!
- * It depends on size of irq_cpustat_t, for example, being 64 bytes. :-)
- * Mmmm... so niiiiiice....
- */
-#define SMP_CACHE_BYTES 64
-#define NR_CPUS 16
-#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
-#define ____cacheline_aligned __cacheline_aligned
-
-#define PHYSICAL_ADDRESS_BITS 52
-#define MAX_PHYSICAL_ADDRESS (1 << PHYSICAL_ADDRESS_BITS)
-#define VIRTUAL_ADDRESS_BITS 48
-#define XEN_PAGE_SIZE 4096
-
-#define PTE_SIZE 8
-#define TOTAL_PTES (512ULL * 512 * 512 * 512)
-
-/* next PML4 from an _END address */
-#define PML4_BITS 39
-#define PML4_SPACE (1ULL << PML4_BITS)
-
-/*
- * Memory layout
- *
- * 0x0000000000000000 - 0x00007fffffffffff Guest & user apps (128TB)
- * (Only for 32-bit guests)
- * 0x00000000fc000000 - 0x00000000fc3fffff Machine/Physical 32-bit shadow (4MB)
- * 0x00000000fc400000 - 0x00000000feffffff IO remap for 32-bit guests (44MB)
- * 0x00000000ff000000 - 0x00000000ff3fffff 32-bit PTE shadow (4MB)
- *
- * 0xffff800000000000 - 0xffff807fffffffff Linear page table (512GB)
- * 0xffff808000000000 - 0xffff80ffffffffff Reserved for shadow page table (512GB)
- *
- * 0xffff810000000000 - 0xffff82ffffffffff Xen PML4 slots
- * 0xffff810000000000 - 0xffff81003fffffff Xen hypervisor virtual space (1GB)
- * 0xffff810040000000 - 0xffff81807fffffff Per-domain mappings (1GB)
- * 0xffff810080000000 - 0xffff81387fffffff R/O physical map (224GB)
- * 0xffff813880000000 - 0xffff81707fffffff R/W physical map (224GB)
- * 0xffff817080000000 - 0xffff82c07fffffff Frame table (1344GB)
- * 0xffff82c080000000 - 0xffff82c0bfffffff I/O remap space (1GB)
- * 0xffff82c0c0000000 - 0xffff82ffffffffff (253GB)
- *
- * 0xffff830000000000 - 0xffff87ffffffffff RESERVED (5TB)
- *
- * 0xffff880000000000 - ... Physical 1:1 direct mapping (112TB max)
- * 0xffff880000000000 - 0xffff880001000000 Low memory DMA region (16M)
- *
- * 0xfffff80000000000 - 0xffffffffffffffff Reserved for guest (8TB)
- *
- * The requirement that we have a 1:1 map of physical memory limits
- * the maximum memory size we can support. With only 48 virtual address
- * bits, and the assumption that guests will run users in positive address
- * space, a contiguous 1:1 map can only live in the negative address space.
- * Since we don't want to bump guests out of the very top of memory and
- * force relocation, we can't use this entire space, and Xen has several
- * heavy mapping that require PML4 slices. Just to be safe, we reserve
- * 16 PML4s each for Xen and the guest. 224 PML4s give us 112 terabytes
- * of addressable memory. Any high device physical addresses beyond this
- * region can be mapped into the IO remap space or some of the reserved
- * 6TB region.
- *
- * 112 TB is just 16 TB shy of the maximum physical memory supported
- * on Linux 2.6.0, and should be enough for anybody.
- *
- * There are some additional constraints in the memory layout that require
- * several changes from the i386 architecture.
- *
- * ACPI data and ACPI non-volatile storage must be placed in some region
- * of memory below the 4GB mark. Depending on the BIOS and system, we
- * may have this located as low as 1GB. This means allocating large
- * chunks of physically contiguous memory from the direct mapping may not
- * be possible.
- *
- * The full frame table for 112TB of physical memory currently occupies
- * 1344GB space. This clearly can not be allocated in physically contiguous
- * space, so it must be moved to a virtual address.
- *
- * Both copies of the machine->physical table must also be relocated.
- * (112 TB / 4k) * 8 bytes means that each copy of the physical map requires
- * 224GB of space, thus it also must move to VM space.
- *
- * The physical pages used to allocate the page tables for the direct 1:1
- * map may occupy (112TB / 2M) * 8 bytes = 448MB. This is almost guaranteed
- * to fit in contiguous physical memory, but these pages used to be allocated
- * in the Xen monitor address space. This means the Xen address space must
- * accomodate up to ~500 MB, which means it also must move out of the
- * direct mapped region.
- *
- * Since both copies of the MPT, the frame table, and Xen now exist in
- * purely virtual space, we have the added advantage of being able to
- * map them to local pages on NUMA machines, or use NUMA aware memory
- * allocation within Xen itself.
- *
- * Additionally, the 1:1 page table now exists contiguously in virtual
- * space, but may be mapped to physically separated pages, allowing
- * each node to contain the page tables for its own local memory. Setting
- * up this mapping presents a bit of a chicken-egg problem, but is possible
- * as a future enhancement.
- *
- * Zachary Amsden (zamsden@cisco.com)
- *
- */
-
-/* Guest and user space */
-#define NSPACE_VIRT_START 0
-#define NSPACE_VIRT_END (1ULL << (VIRTUAL_ADDRESS_BITS - 1))
-
-/* Priviledged space */
-#define ESPACE_VIRT_END 0
-#define ESPACE_VIRT_START (ESPACE_VIRT_END-(1ULL << (VIRTUAL_ADDRESS_BITS-1)))
-
-/* reservations in e-space */
-#define GUEST_RESERVED_PML4S 16
-#define XEN_RESERVED_PML4S 16
-
-#define MAX_MEMORY_SIZE ((1ULL << (VIRTUAL_ADDRESS_BITS-1)) \
- -((GUEST_RESERVED_PML4S + XEN_RESERVED_PML4S) * PML4_SPACE))
-#define MAX_MEMORY_FRAMES (MAX_MEMORY_SIZE / XEN_PAGE_SIZE)
-
-/*
- * Virtual addresses beyond this are not modifiable by guest OSes.
- */
-#define HYPERVISOR_VIRT_START ESPACE_VIRT_START
-#define HYPERVISOR_VIRT_END (ESPACE_VIRT_END-(GUEST_RESERVED_PML4S * PML4_SPACE))
-
-/* First 512GB of virtual address space is used as a linear p.t. mapping. */
-#define LINEAR_PT_VIRT_START (HYPERVISOR_VIRT_START)
-#define LINEAR_PT_VIRT_END (LINEAR_PT_VIRT_START + (PTE_SIZE * TOTAL_PTES))
-
-/* Reserve some space for a shadow PT mapping */
-#define SHADOW_PT_VIRT_START (LINEAR_PT_VIRT_END)
-#define SHADOW_PT_VIRT_END (SHADOW_PT_VIRT_START + (PTE_SIZE * TOTAL_PTES))
-
-/* Xen exists in the first 1GB of the next PML4 space */
-#define MAX_MONITOR_ADDRESS (1 * 1024 * 1024 * 1024)
-#define MONITOR_VIRT_START (SHADOW_PT_VIRT_END)
-#define MONITOR_VIRT_END (MONITOR_VIRT_START + MAX_MONITOR_ADDRESS)
-
-/* Next 1GB of virtual address space used for per-domain mappings (eg. GDT). */
-#define PERDOMAIN_VIRT_START (MONITOR_VIRT_END)
-#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + (512 * 512 * 4096))
-#define GDT_VIRT_START (PERDOMAIN_VIRT_START)
-#define GDT_VIRT_END (GDT_VIRT_START + (128*1024))
-#define LDT_VIRT_START (GDT_VIRT_END)
-#define LDT_VIRT_END (LDT_VIRT_START + (128*1024))
-
-/*
- * First set of MPTs are mapped read-only for all. It's for the machine->physical
- * mapping table (MPT table). The following are virtual addresses.
- */
-#define READONLY_MPT_VIRT_START (PERDOMAIN_VIRT_END)
-#define READONLY_MPT_VIRT_END (READONLY_MPT_VIRT_START + (PTE_SIZE * MAX_MEMORY_FRAMES))
-
-/* R/W machine->physical table */
-#define RDWR_MPT_VIRT_START (READONLY_MPT_VIRT_END)
-#define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + (PTE_SIZE * MAX_MEMORY_FRAMES))
-
-/* Frame table */
-#define FRAMETABLE_ENTRY_SIZE (48)
-#define FRAMETABLE_VIRT_START (RDWR_MPT_VIRT_END)
-#define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + (FRAMETABLE_ENTRY_SIZE * MAX_MEMORY_FRAMES))
-
-/* Next 1GB of virtual address space used for ioremap(). */
-#define IOREMAP_VIRT_START (FRAMETABLE_VIRT_END)
-#define IOREMAP_VIRT_END (IOREMAP_VIRT_START + (512 * 512 * 4096))
-
-/* And the virtual addresses for the direct-map region... */
-#define DIRECTMAP_VIRT_START (ESPACE_VIRT_START + (XEN_RESERVED_PML4S * PML4_SPACE))
-#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + MAX_DIRECTMAP_ADDRESS)
-
-/*
- * Next is the direct-mapped memory region. The following are machine addresses.
- */
-#define MAX_DMA_ADDRESS (16*1024*1024)
-#define MAX_DIRECTMAP_ADDRESS MAX_MEMORY_SIZE
-
-
-
-/*
- * Amount of slack domain memory to leave in system, in kilobytes.
- * Prevents a hard out-of-memory crunch for thinsg like network receive.
- */
-#define SLACK_DOMAIN_MEM_KILOBYTES 2048
-
-
-/*
- * These will probably change in the future..
- * locations for 32-bit guest compatibility mappings
- */
-
-/* 4M of 32-bit machine-physical shadow in low 4G of VM space */
-#define SHADOW_MPT32_VIRT_START (0xfc000000)
-#define SHADOW_MPT32_VIRT_END (SHADOW_MPT32_VIRT_START + (4 * 1024 * 1024))
-
-/* 44M of I/O remap for 32-bit drivers */
-#define IOREMAP_LOW_VIRT_START (SHADOW_MPT32_VIRT_END)
-#define IOREMAP_LOW_VIRT_END (IOREMAP_LOW_VIRT_START + (44 * 1024 * 1024))
-
-/* 4M of 32-bit page table */
-#define SHADOW_PT32_VIRT_START (IOREMAP_LOW_VIRT_END)
-#define SHADOW_PT32_VIRT_END (SHADOW_PT32_VIRT_START + (4 * 1024 * 1024))
-
-
-/* Linkage for x86 */
-#define FASTCALL(x) x __attribute__((regparm(3)))
-#define asmlinkage __attribute__((regparm(0)))
-#define __ALIGN .align 16,0x90
-#define __ALIGN_STR ".align 16,0x90"
-#define SYMBOL_NAME_STR(X) #X
-#define SYMBOL_NAME(X) X
-#define SYMBOL_NAME_LABEL(X) X##:
-#ifdef __ASSEMBLY__
-#define ALIGN __ALIGN
-#define ALIGN_STR __ALIGN_STR
-#define ENTRY(name) \
- .globl SYMBOL_NAME(name); \
- ALIGN; \
- SYMBOL_NAME_LABEL(name)
-#endif
-
-#define PGT_base_page_table PGT_l4_page_table
-
-#define barrier() __asm__ __volatile__("": : :"memory")
-
-/*
- * Hypervisor segment selectors
- */
-#define __HYPERVISOR_CS64 0x0810
-#define __HYPERVISOR_CS32 0x0808
-#define __HYPERVISOR_DS 0x0818
-
-#define NR_syscalls 256
-
-#ifndef NDEBUG
-#define MEMORY_GUARD
-#endif
-
-#ifndef __ASSEMBLY__
-extern unsigned long _end; /* standard ELF symbol */
-extern void __out_of_line_bug(int line) __attribute__((noreturn));
-#define out_of_line_bug() __out_of_line_bug(__LINE__)
-#endif /* __ASSEMBLY__ */
-
-#endif /* __XEN_X86_64_CONFIG_H__ */
+++ /dev/null
-#ifndef X86_64_PDA_H
-#define X86_64_PDA_H
-
-#include <xen/cache.h>
-
-/* Per processor datastructure. %gs points to it while the kernel runs */
-/* To use a new field with the *_pda macros it needs to be added to tools/offset.c */
-struct x8664_pda {
- unsigned long kernelstack; /* TOS for current process */
- unsigned long oldrsp; /* user rsp for system call */
- unsigned long irqrsp; /* Old rsp for interrupts. */
- struct task_struct *pcurrent; /* Current process */
- int irqcount; /* Irq nesting counter. Starts with -1 */
- int cpunumber; /* Logical CPU number */
- /* XXX: could be a single list */
- unsigned long *pgd_quick;
- unsigned long *pmd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
- char *irqstackptr; /* top of irqstack */
- unsigned long volatile *level4_pgt;
-} ____cacheline_aligned;
-
-#define PDA_STACKOFFSET (5*8)
-
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
-extern struct x8664_pda cpu_pda[];
-
-/*
- * There is no fast way to get the base address of the PDA, all the accesses
- * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
- */
-#define sizeof_field(type,field) (sizeof(((type *)0)->field))
-#define typeof_field(type,field) typeof(((type *)0)->field)
-
-extern void __bad_pda_field(void);
-/* Don't use offsetof because it requires too much infrastructure */
-#define pda_offset(field) ((unsigned long)&((struct x8664_pda *)0)->field)
-
-#define pda_to_op(op,field,val) do { \
- switch (sizeof_field(struct x8664_pda, field)) { \
- case 2: asm volatile(op "w %0,%%gs:%P1" :: "r" (val), "i"(pda_offset(field)):"memory"); break; \
- case 4: asm volatile(op "l %0,%%gs:%P1" :: "r" (val), "i"(pda_offset(field)):"memory"); break; \
- case 8: asm volatile(op "q %0,%%gs:%P1" :: "r" (val), "i"(pda_offset(field)):"memory"); break; \
- default: __bad_pda_field(); \
- } \
- } while (0)
-
-
-#define pda_from_op(op,field) ({ \
- typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \
- switch (sizeof_field(struct x8664_pda, field)) { \
- case 2: asm volatile(op "w %%gs:%P1,%0":"=r" (ret__): "i" (pda_offset(field)):"memory"); break; \
- case 4: asm volatile(op "l %%gs:%P1,%0":"=r" (ret__): "i" (pda_offset(field)):"memory"); break; \
- case 8: asm volatile(op "q %%gs:%P1,%0":"=r" (ret__): "i" (pda_offset(field)):"memory"); break; \
- default: __bad_pda_field(); \
- } \
- ret__; })
-
-
-#define read_pda(field) pda_from_op("mov",field)
-#define write_pda(field,val) pda_to_op("mov",field,val)
-#define add_pda(field,val) pda_to_op("add",field,val)
-#define sub_pda(field,val) pda_to_op("sub",field,val)
-
-#endif
+++ /dev/null
-/******************************************************************************
- * arch-i386/hypervisor-if.h
- *
- * Guest OS interface to x86 32-bit Xen.
- */
-
-#ifndef __HYPERVISOR_IF_I386_H__
-#define __HYPERVISOR_IF_I386_H__
-
-/*
- * Pointers and other address fields inside interface structures are padded to
- * 64 bits. This means that field alignments aren't different between 32- and
- * 64-bit architectures.
- */
-/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
-#define __MEMORY_PADDING(_X) u32 __pad_ ## _X
-#define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X)
-#define MEMORY_PADDING _MEMORY_PADDING(__LINE__)
-
-/*
- * SEGMENT DESCRIPTOR TABLES
- */
-/*
- * A number of GDT entries are reserved by Xen. These are not situated at the
- * start of the GDT because some stupid OSes export hard-coded selector values
- * in their ABI. These hard-coded values are always near the start of the GDT,
- * so Xen places itself out of the way.
- *
- * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
- * and LAST_RESERVED_GDT_ENTRY are reserved).
- */
-#define NR_RESERVED_GDT_ENTRIES 40
-#define FIRST_RESERVED_GDT_ENTRY 256
-#define LAST_RESERVED_GDT_ENTRY \
- (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
-
-
-/*
- * These flat segments are in the Xen-private section of every GDT. Since these
- * are also present in the initial GDT, many OSes will be able to avoid
- * installing their own GDT.
- */
-#define FLAT_RING1_CS 0x0819 /* GDT index 259 */
-#define FLAT_RING1_DS 0x0821 /* GDT index 260 */
-#define FLAT_RING3_CS 0x082b /* GDT index 261 */
-#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
-
-#define FLAT_GUESTOS_CS FLAT_RING1_CS
-#define FLAT_GUESTOS_DS FLAT_RING1_DS
-#define FLAT_USER_CS FLAT_RING3_CS
-#define FLAT_USER_DS FLAT_RING3_DS
-
-/* And the trap vector is... */
-#define TRAP_INSTR "int $0x82"
-
-
-/*
- * Virtual addresses beyond this are not modifiable by guest OSes. The
- * machine->physical mapping table starts at this address, read-only.
- */
-#define HYPERVISOR_VIRT_START (0xFC000000UL)
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-#endif
-
-#ifndef __ASSEMBLY__
-
-/* NB. Both the following are 32 bits each. */
-typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
-typedef unsigned long cpureg_t; /* Full-sized register. */
-
-/*
- * Send an array of these to HYPERVISOR_set_trap_table()
- */
-#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
-#define TI_GET_IF(_ti) ((_ti)->flags & 4)
-#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
-typedef struct {
- u8 vector; /* 0: exception vector */
- u8 flags; /* 1: 0-3: privilege level; 4: clear event enable? */
- u16 cs; /* 2: code selector */
- memory_t address; /* 4: code address */
-} PACKED trap_info_t; /* 8 bytes */
-
-typedef struct
-{
- unsigned long ebx;
- unsigned long ecx;
- unsigned long edx;
- unsigned long esi;
- unsigned long edi;
- unsigned long ebp;
- unsigned long eax;
- unsigned long ds;
- unsigned long es;
- unsigned long fs;
- unsigned long gs;
- unsigned long _unused;
- unsigned long eip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long esp;
- unsigned long ss;
-} PACKED execution_context_t;
-
-typedef struct {
- u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
- u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
-} PACKED tsc_timestamp_t; /* 8 bytes */
-
-/*
- * The following is all CPU context. Note that the i387_ctxt block is filled
- * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
- */
-typedef struct {
-#define ECF_I387_VALID (1<<0)
- unsigned long flags;
- execution_context_t cpu_ctxt; /* User-level CPU registers */
- char fpu_ctxt[256]; /* User-level FPU registers */
- trap_info_t trap_ctxt[256]; /* Virtual IDT */
- unsigned int fast_trap_idx; /* "Fast trap" vector offset */
- unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
- unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
- unsigned long guestos_ss, guestos_esp; /* Virtual TSS (only SS1/ESP1) */
- unsigned long pt_base; /* CR3 (pagetable base) */
- unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
- unsigned long event_callback_cs; /* CS:EIP of event callback */
- unsigned long event_callback_eip;
- unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
- unsigned long failsafe_callback_eip;
-} PACKED full_execution_context_t;
-
-#define ARCH_HAS_FAST_TRAP
-
-#endif
-
-#endif
--- /dev/null
+/******************************************************************************
+ * arch-i386/hypervisor-if.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ */
+
+#ifndef __HYPERVISOR_IF_I386_H__
+#define __HYPERVISOR_IF_I386_H__
+
+/*
+ * Pointers and other address fields inside interface structures are padded to
+ * 64 bits. This means that field alignments aren't different between 32- and
+ * 64-bit architectures.
+ */
+/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
+#define __MEMORY_PADDING(_X) u32 __pad_ ## _X
+#define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X)
+#define MEMORY_PADDING _MEMORY_PADDING(__LINE__)
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way.
+ *
+ * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
+ * and LAST_RESERVED_GDT_ENTRY are reserved).
+ */
+#define NR_RESERVED_GDT_ENTRIES 40
+#define FIRST_RESERVED_GDT_ENTRY 256
+#define LAST_RESERVED_GDT_ENTRY \
+ (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
+
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0x0819 /* GDT index 259 */
+#define FLAT_RING1_DS 0x0821 /* GDT index 260 */
+#define FLAT_RING3_CS 0x082b /* GDT index 261 */
+#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
+
+#define FLAT_GUESTOS_CS FLAT_RING1_CS
+#define FLAT_GUESTOS_DS FLAT_RING1_DS
+#define FLAT_USER_CS FLAT_RING3_CS
+#define FLAT_USER_DS FLAT_RING3_DS
+
+/* And the trap vector is... */
+#define TRAP_INSTR "int $0x82"
+
+
+/*
+ * Virtual addresses beyond this are not modifiable by guest OSes. The
+ * machine->physical mapping table starts at this address, read-only.
+ */
+#define HYPERVISOR_VIRT_START (0xFC000000UL)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* NB. Both the following are 32 bits each. */
+typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
+typedef unsigned long cpureg_t; /* Full-sized register. */
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
+typedef struct {
+ u8 vector; /* 0: exception vector */
+ u8 flags; /* 1: 0-3: privilege level; 4: clear event enable? */
+ u16 cs; /* 2: code selector */
+ memory_t address; /* 4: code address */
+} PACKED trap_info_t; /* 8 bytes */
+
+typedef struct
+{
+ unsigned long ebx;
+ unsigned long ecx;
+ unsigned long edx;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned long ebp;
+ unsigned long eax;
+ unsigned long ds;
+ unsigned long es;
+ unsigned long fs;
+ unsigned long gs;
+ unsigned long _unused;
+ unsigned long eip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long esp;
+ unsigned long ss;
+} PACKED execution_context_t;
+
+typedef struct {
+ u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
+ u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
+} PACKED tsc_timestamp_t; /* 8 bytes */
+
+/*
+ * The following is all CPU context. Note that the i387_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+typedef struct {
+#define ECF_I387_VALID (1<<0)
+ unsigned long flags;
+ execution_context_t cpu_ctxt; /* User-level CPU registers */
+ char fpu_ctxt[256]; /* User-level FPU registers */
+ trap_info_t trap_ctxt[256]; /* Virtual IDT */
+ unsigned int fast_trap_idx; /* "Fast trap" vector offset */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long guestos_ss, guestos_esp; /* Virtual TSS (only SS1/ESP1) */
+ unsigned long pt_base; /* CR3 (pagetable base) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+} PACKED full_execution_context_t;
+
+#define ARCH_HAS_FAST_TRAP
+
+#endif
+
+#endif
--- /dev/null
+/******************************************************************************
+ * arch-x86_64/hypervisor-if.h
+ *
+ * Guest OS interface to AMD x86-64 bit Xen.
+ */
+
+#ifndef __HYPERVISOR_IF_X86_64_H__
+#define __HYPERVISOR_IF_X86_64_H__
+
+/* Pointers are naturally 64 bits in this architecture; no padding needed. */
+#define _MEMORY_PADDING(_X)
+#define MEMORY_PADDING
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way.
+ *
+ * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
+ * and LAST_RESERVED_GDT_ENTRY are reserved).
+ */
+#define NR_RESERVED_GDT_ENTRIES 40
+#define FIRST_RESERVED_GDT_ENTRY 256
+#define LAST_RESERVED_GDT_ENTRY \
+ (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
+
+/*
+ * 64-bit segment selectors
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+
+#define FLAT_RING3_CS32 0x0823 /* GDT index 260 */
+#define FLAT_RING3_CS64 0x082b /* GDT index 261 */
+#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
+
+#define FLAT_GUESTOS_DS FLAT_RING3_DS
+#define FLAT_GUESTOS_CS FLAT_RING3_CS64
+#define FLAT_GUESTOS_CS32 FLAT_RING3_CS32
+
+#define FLAT_USER_DS FLAT_RING3_DS
+#define FLAT_USER_CS FLAT_RING3_CS64
+#define FLAT_USER_CS32 FLAT_RING3_CS32
+
+/* And the trap vector is... */
+#define TRAP_INSTR "syscall"
+
+/* The machine->physical mapping table starts at this address, read-only. */
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)0xffff810000000000ULL)
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* NB. Both the following are 64 bits each. */
+typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
+typedef unsigned long cpureg_t; /* Full-sized register. */
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
+typedef struct {
+ u8 vector; /* 0: exception vector */
+ u8 flags; /* 1: 0-3: privilege level; 4: clear event enable? */
+ u16 cs; /* 2: code selector */
+ u32 __pad; /* 4 */
+ memory_t address; /* 8: code address */
+} PACKED trap_info_t; /* 16 bytes */
+
+typedef struct
+{
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+} PACKED execution_context_t;
+
+/*
+ * NB. This may become a 64-bit count with no shift. If this happens then the
+ * structure size will still be 8 bytes, so no other alignments will change.
+ */
+typedef struct {
+ u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
+ u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
+} PACKED tsc_timestamp_t; /* 8 bytes */
+
+/*
+ * The following is all CPU context. Note that the i387_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+typedef struct {
+#define ECF_I387_VALID (1<<0)
+ unsigned long flags;
+ execution_context_t cpu_ctxt; /* User-level CPU registers */
+ char fpu_ctxt[512]; /* User-level FPU registers */
+ trap_info_t trap_ctxt[256]; /* Virtual IDT */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long guestos_ss, guestos_esp; /* Virtual TSS (only SS1/ESP1) */
+ unsigned long pt_base; /* CR3 (pagetable base) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+} PACKED full_execution_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __HYPERVISOR_IF_H__ */
+++ /dev/null
-/******************************************************************************
- * arch-x86_64/hypervisor-if.h
- *
- * Guest OS interface to AMD x86-64 bit Xen.
- */
-
-#ifndef __HYPERVISOR_IF_X86_64_H__
-#define __HYPERVISOR_IF_X86_64_H__
-
-/* Pointers are naturally 64 bits in this architecture; no padding needed. */
-#define MEMORY_PADDING() ((void)0)
-
-/*
- * SEGMENT DESCRIPTOR TABLES
- */
-/*
- * A number of GDT entries are reserved by Xen. These are not situated at the
- * start of the GDT because some stupid OSes export hard-coded selector values
- * in their ABI. These hard-coded values are always near the start of the GDT,
- * so Xen places itself out of the way.
- *
- * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
- * and LAST_RESERVED_GDT_ENTRY are reserved).
- */
-#define NR_RESERVED_GDT_ENTRIES 40
-#define FIRST_RESERVED_GDT_ENTRY 256
-#define LAST_RESERVED_GDT_ENTRY \
- (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
-
-/*
- * 64-bit segment selectors
- * These flat segments are in the Xen-private section of every GDT. Since these
- * are also present in the initial GDT, many OSes will be able to avoid
- * installing their own GDT.
- */
-
-#define FLAT_RING3_CS32 0x0823 /* GDT index 260 */
-#define FLAT_RING3_CS64 0x082b /* GDT index 261 */
-#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
-
-#define FLAT_GUESTOS_DS FLAT_RING3_DS
-#define FLAT_GUESTOS_CS FLAT_RING3_CS64
-#define FLAT_GUESTOS_CS32 FLAT_RING3_CS32
-
-#define FLAT_USER_DS FLAT_RING3_DS
-#define FLAT_USER_CS FLAT_RING3_CS64
-#define FLAT_USER_CS32 FLAT_RING3_CS32
-
-/* And the trap vector is... */
-#define TRAP_INSTR "syscall"
-
-/* The machine->physical mapping table starts at this address, read-only. */
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)0xffff810000000000ULL)
-#endif
-
-#ifndef __ASSEMBLY__
-
-/* NB. Both the following are 64 bits each. */
-typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
-typedef unsigned long cpureg_t; /* Full-sized register. */
-
-/*
- * Send an array of these to HYPERVISOR_set_trap_table()
- */
-#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
-#define TI_GET_IF(_ti) ((_ti)->flags & 4)
-#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
-typedef struct {
- u8 vector; /* 0: exception vector */
- u8 flags; /* 1: 0-3: privilege level; 4: clear event enable? */
- u16 cs; /* 2: code selector */
- u32 __pad; /* 4 */
- memory_t address; /* 8: code address */
-} PACKED trap_info_t; /* 16 bytes */
-
-typedef struct
-{
- unsigned long r15;
- unsigned long r14;
- unsigned long r13;
- unsigned long r12;
- unsigned long rbp;
- unsigned long rbx;
- unsigned long r11;
- unsigned long r10;
- unsigned long r9;
- unsigned long r8;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rdx;
- unsigned long rsi;
- unsigned long rdi;
- unsigned long rip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long rsp;
- unsigned long ss;
-} PACKED execution_context_t;
-
-/*
- * NB. This may become a 64-bit count with no shift. If this happens then the
- * structure size will still be 8 bytes, so no other alignments will change.
- */
-typedef struct {
- u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
- u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
-} PACKED tsc_timestamp_t; /* 8 bytes */
-
-/*
- * The following is all CPU context. Note that the i387_ctxt block is filled
- * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
- */
-typedef struct {
-#define ECF_I387_VALID (1<<0)
- unsigned long flags;
- execution_context_t cpu_ctxt; /* User-level CPU registers */
- char fpu_ctxt[512]; /* User-level FPU registers */
- trap_info_t trap_ctxt[256]; /* Virtual IDT */
- unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
- unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
- unsigned long guestos_ss, guestos_esp; /* Virtual TSS (only SS1/ESP1) */
- unsigned long pt_base; /* CR3 (pagetable base) */
- unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
- unsigned long event_callback_cs; /* CS:EIP of event callback */
- unsigned long event_callback_eip;
- unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
- unsigned long failsafe_callback_eip;
-} PACKED full_execution_context_t;
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __HYPERVISOR_IF_H__ */
/* GCC-specific way to pack structure definitions (no implicit padding). */
#define PACKED __attribute__ ((packed))
-#include "arch/hypervisor-if.h"
+#if defined(__i386__)
+#include "arch-x86_32.h"
+#elif defined(__x86_64__)
+#include "arch-x86_64.h"
+#else
+#error "Unsupported architecture"
+#endif
/*
* HYPERVISOR "SYSTEM CALLS"