((1UL << (PAGE_SHIFT - 3)) - 1)))
static unsigned long table_size;
-static int opt_contig_mem = 0;
+static bool_t __read_mostly opt_contig_mem;
boolean_param("contig_mem", opt_contig_mem);
#else
#define opt_contig_mem 1
extern void xen_patch_kernel(void);
/* nosmp: ignore secondary processors */
-static int __initdata opt_nosmp;
+static bool_t __initdata opt_nosmp;
boolean_param("nosmp", opt_nosmp);
/* maxcpus: maximum number of CPUs to activate */
integer_param("xencons", opt_xencons);
/* xencons_poll: toggle non-legacy xencons UARTs to run in polling mode */
-static int __initdata opt_xencons_poll;
+static bool_t __initdata opt_xencons_poll;
boolean_param("xencons_poll", opt_xencons_poll);
#define XENHEAP_DEFAULT_SIZE KERNEL_TR_PAGE_SIZE
};
/* efi_print: print efi table at boot */
-static int __initdata opt_efi_print;
+static bool_t __initdata opt_efi_print;
boolean_param("efi_print", opt_efi_print);
/* print EFI memory map: */
#define PREFIX "ACPI: "
#ifdef CONFIG_ACPI_PCI
-int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
-int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
+bool_t __initdata acpi_noirq; /* skip ACPI IRQ initialization */
+bool_t __initdata acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
#else
-int acpi_noirq __initdata = 1;
-int acpi_pci_disabled __initdata = 1;
+bool_t __initdata acpi_noirq = 1;
+bool_t __initdata acpi_pci_disabled = 1;
#endif
-int acpi_ht __initdata = 1; /* enable HT */
+bool_t __initdata acpi_ht = 1; /* enable HT */
-int acpi_lapic;
-int acpi_ioapic;
-int acpi_strict;
-EXPORT_SYMBOL(acpi_strict);
+bool_t __initdata acpi_lapic;
+bool_t __initdata acpi_ioapic;
u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
-int acpi_skip_timer_override __initdata;
+bool_t acpi_skip_timer_override __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
return;
}
-extern int acpi_force;
-
#ifdef __i386__
static int __init disable_acpi_irq(struct dmi_system_id *d)
static void (*pm_idle_save) (void) __read_mostly;
unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER - 1;
integer_param("max_cstate", max_cstate);
-static int local_apic_timer_c2_ok __read_mostly = 0;
+static bool_t __read_mostly local_apic_timer_c2_ok;
boolean_param("lapic_timer_c2_ok", local_apic_timer_c2_ok);
static struct acpi_processor_power *__read_mostly processor_powers[NR_CPUS];
#include <mach_apic.h>
#include <io_ports.h>
-static int tdt_enabled __read_mostly;
-static int tdt_enable __initdata = 1;
+static bool_t tdt_enabled __read_mostly;
+static bool_t tdt_enable __initdata = 1;
boolean_param("tdt", tdt_enable);
static struct {
*/
int apic_verbosity;
-static int opt_x2apic = 1;
+static bool_t __initdata opt_x2apic = 1;
boolean_param("x2apic", opt_x2apic);
-int x2apic_enabled __read_mostly = 0;
-int directed_eoi_enabled __read_mostly = 0;
+bool_t __read_mostly x2apic_enabled = 0;
+bool_t __read_mostly directed_eoi_enabled = 0;
/*
* The following vectors are part of the Linux architecture, there
/* Using APIC to generate smp_local_timer_interrupt? */
static bool_t __read_mostly using_apic_timer;
-static int enabled_via_apicbase;
+static bool_t __read_mostly enabled_via_apicbase;
void enable_NMI_through_LVT0 (void * dummy)
{
* amd_flush_filter={on,off}. Forcibly Enable or disable the TLB flush
* filter on AMD 64-bit processors.
*/
-static int __read_mostly flush_filter_force;
+static s8 __devinit flush_filter_force;
static void __init flush_filter(char *s)
{
switch (parse_bool(s))
#define disable_pse 0
static int cachesize_override __cpuinitdata = -1;
-static int disable_x86_fxsr __cpuinitdata;
-static int disable_x86_serial_nr __cpuinitdata;
+size_param("cachesize", cachesize_override);
+static bool_t __cpuinitdata disable_x86_fxsr;
+boolean_param("nofxsr", disable_x86_fxsr);
+static bool_t __cpuinitdata disable_x86_serial_nr;
+boolean_param("noserialnumber", disable_x86_serial_nr);
-static int use_xsave;
+static bool_t __cpuinitdata use_xsave;
boolean_param("xsave", use_xsave);
unsigned int __devinitdata opt_cpuid_mask_ecx = ~0u;
integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
};
static struct cpu_dev * this_cpu = &default_cpu;
-integer_param("cachesize", cachesize_override);
-
-int __cpuinitdata opt_cpu_info;
+bool_t __cpuinitdata opt_cpu_info;
boolean_param("cpuinfo", opt_cpu_info);
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
}
-boolean_param("nofxsr", disable_x86_fxsr);
-
-
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(unsigned long flag)
{
}
}
-boolean_param("noserialnumber", disable_x86_serial_nr);
-
-
/*
* This does the hard work of actually picking apart the CPU stuff...
#include "mce.h"
-int mce_disabled;
+bool_t __read_mostly mce_disabled;
invbool_param("mce", mce_disabled);
-int is_mc_panic;
-unsigned int nr_mce_banks;
-
-int mce_broadcast = 0;
-int firstbank;
+bool_t __read_mostly mce_broadcast = 0;
+bool_t is_mc_panic;
+unsigned int __read_mostly nr_mce_banks;
+int __read_mostly firstbank;
static void intpose_init(void);
static void mcinfo_clear(struct mc_info *);
DECLARE_PER_CPU(struct mca_banks *, poll_bankmask);
DECLARE_PER_CPU(struct mca_banks *, no_cmci_banks);
-extern int cmci_support;
-extern int ser_support;
-extern int is_mc_panic;
-extern int mce_broadcast;
+extern bool_t cmci_support;
+extern bool_t is_mc_panic;
+extern bool_t mce_broadcast;
extern void mcheck_mca_clearbanks(struct mca_banks *);
extern mctelem_cookie_t mcheck_mca_logout(enum mca_source, struct mca_banks *,
DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned);
DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks);
DEFINE_PER_CPU(struct mca_banks *, mce_clear_banks);
-int cmci_support = 0;
-int ser_support = 0;
-static int mce_force_broadcast;
+bool_t __read_mostly cmci_support = 0;
+static bool_t __read_mostly ser_support = 0;
+static bool_t __read_mostly mce_force_broadcast;
boolean_param("mce_fb", mce_force_broadcast);
static int nr_intel_ext_msrs = 0;
/* Check and init MCA */
static void intel_init_mca(struct cpuinfo_x86 *c)
{
- int broadcast, cmci=0, ser=0, ext_num = 0, first;
+ bool_t broadcast, cmci = 0, ser = 0;
+ int ext_num = 0, first;
uint64_t msr_content;
broadcast = mce_is_broadcast(c);
};
/* Global variables */
-extern int mce_disabled;
+extern bool_t mce_disabled;
extern unsigned int nr_mce_banks;
#endif /* X86_MCA_H */
#ifdef CONFIG_ACPI_BOOT
-extern int acpi_force;
-
static __init __attribute__((unused)) int dmi_disable_acpi(struct dmi_blacklist *d)
{
if (!acpi_force) {
return alloc_vcpu(dom0, 0, 0);
}
-static unsigned int __initdata opt_dom0_shadow;
+static bool_t __initdata opt_dom0_shadow;
boolean_param("dom0_shadow", opt_dom0_shadow);
static char __initdata opt_dom0_ioports_disable[200] = "";
size_param("availmem", opt_availmem);
/* opt_nomtrr_check: Don't clip ram to highest cacheable MTRR. */
-static int __initdata e820_mtrr_clip = -1;
+static s8 __initdata e820_mtrr_clip = -1;
boolean_param("e820-mtrr-clip", e820_mtrr_clip);
/* opt_e820_verbose: Be verbose about clipping, the original e820, &c */
-static int __initdata e820_verbose;
+static bool_t __initdata e820_verbose;
boolean_param("e820-verbose", e820_verbose);
struct e820map e820;
/* Conservative estimate of top-of-RAM by looking for MTRR WB regions. */
#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
-static uint64_t mtrr_top_of_ram(void)
+static uint64_t __init mtrr_top_of_ram(void)
{
uint32_t eax, ebx, ecx, edx;
uint64_t mtrr_cap, mtrr_def, addr_mask, base, mask, top;
#include <xen/smp.h>
#include <asm/mach-default/mach_mpparse.h>
-static int __initdata x2apic_phys; /* By default we use logical cluster mode. */
+static bool_t __initdata x2apic_phys; /* By default we use logical cluster mode. */
boolean_param("x2apic_phys", x2apic_phys);
static void init_apic_ldr_x2apic_phys(void)
* if RTC interrupts are enabled. Enable this option if want to always enable
* legacy hpet broadcast for deep C state
*/
-int force_hpet_broadcast;
+static bool_t __read_mostly force_hpet_broadcast;
boolean_param("hpetbroadcast", force_hpet_broadcast);
/*
#include <public/version.h>
#include <public/memory.h>
-int hvm_enabled __read_mostly;
+bool_t __read_mostly hvm_enabled;
unsigned int opt_hvm_debug_level __read_mostly;
integer_param("hvm_debug", opt_hvm_debug_level);
#include <xen/bitmap.h>
#include <asm/hvm/support.h>
-int __read_mostly hvm_port80_allowed = -1;
+s8 __read_mostly hvm_port80_allowed = -1;
boolean_param("hvm_port80", hvm_port80_allowed);
static int __init dmi_hvm_deny_port80(/*const*/ struct dmi_system_id *id)
#include <asm/shadow.h>
#include <asm/tboot.h>
-static int __read_mostly opt_vpid_enabled = 1;
+static bool_t __read_mostly opt_vpid_enabled = 1;
boolean_param("vpid", opt_vpid_enabled);
-static int __read_mostly opt_unrestricted_guest_enabled = 1;
+static bool_t __read_mostly opt_unrestricted_guest_enabled = 1;
boolean_param("unrestricted_guest", opt_unrestricted_guest_enabled);
/*
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/vmcb.h>
-static int __read_mostly opt_vpmu_enabled;
+static bool_t __read_mostly opt_vpmu_enabled;
boolean_param("vpmu", opt_vpmu_enabled);
int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
static DEFINE_SPINLOCK(ioapic_lock);
-int skip_ioapic_setup;
+bool_t __read_mostly skip_ioapic_setup;
#ifndef sis_apic_bug
/*
int pin, ioapic, irq, irq_entry;
struct irq_cfg *cfg;
- if (skip_ioapic_setup == 1)
+ if (skip_ioapic_setup)
return;
for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
#include <public/physdev.h>
/* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
-int __read_mostly opt_noirqbalance = 0;
+bool_t __read_mostly opt_noirqbalance = 0;
boolean_param("noirqbalance", opt_noirqbalance);
unsigned int __read_mostly nr_irqs_gsi = 16;
#define PTE_UPDATE_WITH_CMPXCHG
#endif
-int mem_hotplug = 0;
+bool_t __read_mostly mem_hotplug = 0;
/* Private domain structs for DOMID_XEN and DOMID_IO. */
struct domain *dom_xen, *dom_io, *dom_cow;
#define PAGE_CACHE_ATTRS (_PAGE_PAT|_PAGE_PCD|_PAGE_PWT)
-int opt_allow_superpage;
+bool_t __read_mostly opt_allow_superpage;
boolean_param("allowsuperpage", opt_allow_superpage);
#ifdef __i386__
#define P2M_DEBUGGING 0
/* turn on/off 1GB host page table support for hap */
-static int opt_hap_1gb = 0;
+static bool_t __read_mostly opt_hap_1gb;
boolean_param("hap_1gb", opt_hap_1gb);
/* Printouts */
case XENPF_platform_quirk:
{
- extern int opt_noirqbalance;
int quirk_id = op->u.platform_quirk.quirk_id;
ret = xsm_platform_quirk(quirk_id);
extern struct boot_video_info boot_vid_info;
/* opt_nosmp: If true, secondary processors are ignored. */
-static int __initdata opt_nosmp = 0;
+static bool_t __initdata opt_nosmp;
boolean_param("nosmp", opt_nosmp);
/* maxcpus: maximum number of CPUs to activate. */
integer_param("maxcpus", max_cpus);
/* opt_watchdog: If true, run a watchdog NMI on each processor. */
-static int __initdata opt_watchdog = 0;
+static bool_t __initdata opt_watchdog;
boolean_param("watchdog", opt_watchdog);
/* **** Linux config option: propagated to domain0. */
/* **** Linux config option: propagated to domain0. */
/* xen_cpuidle: xen control cstate. */
-/*static*/ int xen_cpuidle = -1;
+s8 __read_mostly xen_cpuidle = -1;
boolean_param("cpuidle", xen_cpuidle);
-int early_boot = 1;
+bool_t __read_mostly early_boot = 1;
cpumask_t __read_mostly cpu_present_map;
unsigned long __read_mostly mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
-int __read_mostly acpi_disabled;
-
-int __read_mostly acpi_force;
+bool_t __initdata acpi_disabled;
+bool_t __initdata acpi_force;
static char __initdata acpi_param[10] = "";
static void __init parse_acpi_param(char *s)
{
acpi_ht = 1;
acpi_disabled = 0;
}
- else if ( !strcmp(s, "strict") )
- {
- acpi_strict = 1;
- }
else if ( !strcmp(s, "ht") )
{
if ( !acpi_force )
* cpu_mask that denotes the CPUs that needs timer interrupt coming in as
* IPIs in place of local APIC timers
*/
-extern int xen_cpuidle;
static cpumask_t pit_broadcast_mask;
static void smp_send_timer_broadcast_ipi(void)
static int debug_stack_lines = 20;
integer_param("debug_stack_lines", debug_stack_lines);
-static int opt_ler;
+static bool_t __devinitdata opt_ler;
boolean_param("ler", opt_ler);
#ifdef CONFIG_X86_32
unsigned int xen_processor_pmbits = XEN_PROCESSOR_PM_PX;
/* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
-static unsigned int opt_dom0_vcpus_pin;
+static bool_t opt_dom0_vcpus_pin;
boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
/* set xen as default cpufreq */
xen_commandline_t saved_cmdline;
+static void __init assign_integer_param(
+ struct kernel_param *param, uint64_t val)
+{
+ switch ( param->len )
+ {
+ case sizeof(uint8_t):
+ *(uint8_t *)param->var = val;
+ break;
+ case sizeof(uint16_t):
+ *(uint16_t *)param->var = val;
+ break;
+ case sizeof(uint32_t):
+ *(uint32_t *)param->var = val;
+ break;
+ case sizeof(uint64_t):
+ *(uint64_t *)param->var = val;
+ break;
+ default:
+ BUG();
+ }
+}
+
void __init cmdline_parse(char *cmdline)
{
char opt[100], *optval, *optkey, *q;
strlcpy(param->var, optval, param->len);
break;
case OPT_UINT:
- *(unsigned int *)param->var = simple_strtol(optval, NULL, 0);
+ assign_integer_param(
+ param,
+ simple_strtoll(optval, NULL, 0));
break;
case OPT_BOOL:
case OPT_INVBOOL:
if ( !parse_bool(optval) )
bool_assert = !bool_assert;
- if ( param->type == OPT_INVBOOL )
- bool_assert = !bool_assert;
- *(int *)param->var = bool_assert;
+ assign_integer_param(
+ param,
+ (param->type == OPT_BOOL) == bool_assert);
break;
- case OPT_SIZE: {
- uint64_t sz = parse_size_and_unit(optval, NULL);
- switch ( param->len )
- {
- case sizeof(uint32_t):
- *(uint32_t *)param->var = sz;
- break;
- case sizeof(uint64_t):
- *(uint64_t *)param->var = sz;
- break;
- default:
- BUG();
- }
+ case OPT_SIZE:
+ assign_integer_param(
+ param,
+ parse_size_and_unit(optval, NULL));
break;
- }
case OPT_CUSTOM:
((void (*)(const char *))param->var)(optval);
break;
+ default:
+ BUG();
+ break;
}
}
}
/*
* no-bootscrub -> Free pages are not zeroed during boot.
*/
-static int opt_bootscrub __initdata = 1;
+static bool_t opt_bootscrub __initdata = 1;
boolean_param("bootscrub", opt_bootscrub);
/*
/*
* Boot parameters
*/
-int sched_credit_default_yield = 0;
+static bool_t __read_mostly sched_credit_default_yield;
boolean_param("sched_credit_default_yield", sched_credit_default_yield);
/*
svc->start_time += (credits * MILLISECS(1)) / CSCHED_CREDITS_PER_MSEC;
}
-static int opt_tickle_one_idle __read_mostly = 1;
+static bool_t __read_mostly opt_tickle_one_idle = 1;
boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
* scheduler will give preferrence to partially idle package compared to
* the full idle package, when picking pCPU to schedule vCPU.
*/
-int sched_smt_power_savings = 0;
+bool_t sched_smt_power_savings = 0;
boolean_param("sched_smt_power_savings", sched_smt_power_savings);
/* Various timer handlers. */
#include <public/sched.h>
/* opt_noreboot: If true, machine will need manual reset on error. */
-int opt_noreboot;
+bool_t __read_mostly opt_noreboot;
boolean_param("noreboot", opt_noreboot);
static void maybe_reboot(void)
#define EXPORT /* indicates code other modules are dependent upon */
-EXPORT int opt_tmem = 1;
+EXPORT bool_t __read_mostly opt_tmem = 1;
boolean_param("tmem", opt_tmem);
-EXPORT int opt_tmem_compress = 0;
+EXPORT bool_t __read_mostly opt_tmem_compress = 0;
boolean_param("tmem_compress", opt_tmem_compress);
-EXPORT int opt_tmem_dedup = 0;
+EXPORT bool_t __read_mostly opt_tmem_dedup = 0;
boolean_param("tmem_dedup", opt_tmem_dedup);
-EXPORT int opt_tmem_tze = 0;
+EXPORT bool_t __read_mostly opt_tmem_tze = 0;
boolean_param("tmem_tze", opt_tmem_tze);
-EXPORT int opt_tmem_shared_auth = 0;
+EXPORT bool_t __read_mostly opt_tmem_shared_auth = 0;
boolean_param("tmem_shared_auth", opt_tmem_shared_auth);
-EXPORT int opt_tmem_lock = 0;
+EXPORT int __read_mostly opt_tmem_lock = 0;
integer_param("tmem_lock", opt_tmem_lock);
EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0);
#define FIRMWARE_TIMEOUT (1 * 1000)
#define FIRMWARE_MAX_STALL 50 /* 50us */
-static struct acpi_table_erst *erst_tab;
-static int erst_enabled;
+static struct acpi_table_erst *__read_mostly erst_tab;
+static bool_t __read_mostly erst_enabled;
/* ERST Error Log Address Range atrributes */
#define ERST_RANGE_RESERVED 0x0001
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER */
-int acpi_specific_hotkey_enabled = TRUE;
-EXPORT_SYMBOL(acpi_specific_hotkey_enabled);
-
void acpi_os_printf(const char *fmt, ...)
{
va_list args;
string_param("conswitch", opt_conswitch);
/* sync_console: force synchronous console output (useful for debugging). */
-static int __read_mostly opt_sync_console;
+static bool_t __initdata opt_sync_console;
boolean_param("sync_console", opt_sync_console);
/* console_to_ring: send guest (incl. dom 0) console data to console ring. */
-static int __read_mostly opt_console_to_ring;
+static bool_t __read_mostly opt_console_to_ring;
boolean_param("console_to_ring", opt_console_to_ring);
/* console_timestamps: include a timestamp prefix on every Xen console line. */
-static int __read_mostly opt_console_timestamps;
+static bool_t __read_mostly opt_console_timestamps;
boolean_param("console_timestamps", opt_console_timestamps);
/* conring_size: allows a large console ring than default (16kB). */
#include "dmar.h"
#include <xen/keyhandler.h>
-extern int qinval_enabled;
-extern int ats_enabled;
+extern bool_t ats_enabled;
extern bool_t rwbf_quirk;
void print_iommu_regs(struct acpi_drhd_unit *drhd);
struct pci_ats_dev;
-int ats_enabled = 0;
+bool_t __read_mostly ats_enabled = 0;
struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
{
static void parse_ats_param(char *s);
custom_param("ats", parse_ats_param);
-int __read_mostly ats_enabled = 1;
+bool_t __read_mostly ats_enabled = 1;
static void __init parse_ats_param(char *s)
{
* iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0
* 1:1 iommu mappings except xen and unusable regions.
*/
-static int __read_mostly iommu_inclusive_mapping = 1;
+static bool_t __initdata iommu_inclusive_mapping = 1;
boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
void *map_vtd_domain_page(u64 maddr)
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
-extern int acpi_lapic;
-extern int acpi_ioapic;
-extern int acpi_noirq;
-extern int acpi_strict;
-extern int acpi_disabled;
-extern int acpi_ht;
-extern int acpi_pci_disabled;
-extern int acpi_skip_timer_override;
-extern int acpi_use_timer_override;
+extern bool_t acpi_lapic, acpi_ioapic, acpi_noirq;
+extern bool_t acpi_force, acpi_ht;
+extern bool_t acpi_disabled, acpi_pci_disabled;
+extern bool_t acpi_skip_timer_override;
extern u32 acpi_smi_cmd;
extern u8 acpi_enable_value, acpi_disable_value;
extern u8 acpi_sci_flags;
#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
extern int apic_verbosity;
-extern int x2apic_enabled;
-extern int directed_eoi_enabled;
+extern bool_t x2apic_enabled;
+extern bool_t directed_eoi_enabled;
void x2apic_bsp_setup(void);
void x2apic_ap_setup(void);
};
extern struct hvm_function_table hvm_funcs;
-extern int hvm_enabled;
+extern bool_t hvm_enabled;
extern bool_t cpu_has_lmsl;
int hvm_domain_initialise(struct domain *d);
}
/* 1 if "noapic" boot option passed */
-extern int skip_ioapic_setup;
+extern bool_t skip_ioapic_setup;
#ifdef CONFIG_ACPI_BOOT
extern int io_apic_get_unique_id (int ioapic, int apic_id);
extern u8 *irq_vector;
+extern bool_t opt_noirqbalance;
+
/*
* Per-cpu current frame pointer - the location of the last exception frame on
* the stack
int check_descriptor(const struct domain *, struct desc_struct *d);
-extern int opt_allow_superpage;
-extern int mem_hotplug;
+extern bool_t opt_allow_superpage;
+extern bool_t mem_hotplug;
/******************************************************************************
* With shadow pagetables, the different kinds of address start
extern u64 host_pat;
extern int phys_proc_id[NR_CPUS];
extern int cpu_core_id[NR_CPUS];
-extern int opt_cpu_info;
+extern bool_t opt_cpu_info;
/* Maximum width of physical addresses supported by the hardware */
extern unsigned int paddr_bits;
#include <xen/multiboot.h>
-extern int early_boot;
+extern bool_t early_boot;
+extern s8 xen_cpuidle;
extern unsigned long xenheap_initial_phys_start;
void init_done(void);
}
#endif
-extern int pnpacpi_disabled;
-
void acpi_reboot(void);
void acpi_dmar_zap(void);
void set_vcpu_migration_delay(unsigned int delay);
unsigned int get_vcpu_migration_delay(void);
-extern int sched_smt_power_savings;
+extern bool_t sched_smt_power_savings;
extern enum cpufreq_controller {
FREQCTL_none, FREQCTL_dom0_kernel, FREQCTL_xen
#define __XEN_SHUTDOWN_H__
/* opt_noreboot: If true, machine will need manual reset on error. */
-extern int opt_noreboot;
+extern bool_t opt_noreboot;
void dom0_shutdown(u8 reason);
extern void tmh_release_avail_pages_to_host(void);
extern void tmh_scrub_page(struct page_info *pi, unsigned int memflags);
-extern int opt_tmem_compress;
-static inline int tmh_compression_enabled(void)
+extern bool_t opt_tmem_compress;
+static inline bool_t tmh_compression_enabled(void)
{
return opt_tmem_compress;
}
-extern int opt_tmem_dedup;
-static inline int tmh_dedup_enabled(void)
+extern bool_t opt_tmem_dedup;
+static inline bool_t tmh_dedup_enabled(void)
{
return opt_tmem_dedup;
}
-extern int opt_tmem_tze;
-static inline int tmh_tze_enabled(void)
+extern bool_t opt_tmem_tze;
+static inline bool_t tmh_tze_enabled(void)
{
return opt_tmem_tze;
}
opt_tmem_tze = 0;
}
-extern int opt_tmem_shared_auth;
-static inline int tmh_shared_auth(void)
+extern bool_t opt_tmem_shared_auth;
+static inline bool_t tmh_shared_auth(void)
{
return opt_tmem_shared_auth;
}
-extern int opt_tmem;
-static inline int tmh_enabled(void)
+extern bool_t opt_tmem;
+static inline bool_t tmh_enabled(void)
{
return opt_tmem;
}
extern int opt_tmem_lock;
-extern int opt_tmem_flush_dups;
-
/*
* Memory free page list management
*/
}
#define tmh_lock_all opt_tmem_lock
-#define tmh_flush_dups opt_tmem_flush_dups
#define tmh_called_from_tmem(_memflags) (_memflags & MEMF_tmem)
/* "Client" (==domain) abstraction */