domctl.domain = (domid_t)domid;
op = &(domctl.u.mem_sharing_op);
op->op = XEN_DOMCTL_MEM_SHARING_OP_CONTROL;
- op->enable = enable;
+ op->u.enable = enable;
return do_domctl(xc_handle, &domctl);
}
domctl.domain = (domid_t)domid;
op = &(domctl.u.mem_sharing_op);
op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN;
- op->nominate.gfn = gfn;
+ op->u.nominate.u.gfn = gfn;
ret = do_domctl(xc_handle, &domctl);
- if(!ret) *handle = op->nominate.handle;
+ if(!ret) *handle = op->u.nominate.handle;
return ret;
}
domctl.domain = (domid_t)domid;
op = &(domctl.u.mem_sharing_op);
op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF;
- op->nominate.grant_ref = gref;
+ op->u.nominate.u.grant_ref = gref;
ret = do_domctl(xc_handle, &domctl);
- if(!ret) *handle = op->nominate.handle;
+ if(!ret) *handle = op->u.nominate.handle;
return ret;
}
domctl.domain = 0;
op = &(domctl.u.mem_sharing_op);
op->op = XEN_DOMCTL_MEM_SHARING_OP_SHARE;
- op->share.source_handle = source_handle;
- op->share.client_handle = client_handle;
+ op->u.share.source_handle = source_handle;
+ op->u.share.client_handle = client_handle;
return do_domctl(xc_handle, &domctl);
}
domctl.domain = (domid_t)domid;
op = &(domctl.u.mem_sharing_op);
op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN;
- op->debug.gfn = gfn;
+ op->u.debug.u.gfn = gfn;
return do_domctl(xc_handle, &domctl);
}
domctl.domain = (domid_t)domid;
op = &(domctl.u.mem_sharing_op);
op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN;
- op->debug.mfn = mfn;
+ op->u.debug.u.mfn = mfn;
return do_domctl(xc_handle, &domctl);
}
domctl.domain = (domid_t)domid;
op = &(domctl.u.mem_sharing_op);
op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF;
- op->debug.gref = gref;
+ op->u.debug.u.gref = gref;
return do_domctl(xc_handle, &domctl);
}
{
DECLARE_SYSCTL;
int ret = 0;
- struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.get_para;
+ struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para;
bool has_num = user_para->cpu_num &&
user_para->freq_num &&
user_para->gov_num;
int xc_set_cpufreq_gov(int xc_handle, int cpuid, char *govname)
{
DECLARE_SYSCTL;
- char *scaling_governor = sysctl.u.pm_op.set_gov.scaling_governor;
+ char *scaling_governor = sysctl.u.pm_op.u.set_gov.scaling_governor;
if ( (xc_handle < 0) || (!govname) )
return -EINVAL;
sysctl.cmd = XEN_SYSCTL_pm_op;
sysctl.u.pm_op.cmd = SET_CPUFREQ_PARA;
sysctl.u.pm_op.cpuid = cpuid;
- sysctl.u.pm_op.set_para.ctrl_type = ctrl_type;
- sysctl.u.pm_op.set_para.ctrl_value = ctrl_value;
+ sysctl.u.pm_op.u.set_para.ctrl_type = ctrl_type;
+ sysctl.u.pm_op.u.set_para.ctrl_value = ctrl_value;
return xc_sysctl(xc_handle, &sysctl);
}
sysctl.u.pm_op.cpuid = cpuid;
ret = xc_sysctl(xc_handle, &sysctl);
- *avg_freq = sysctl.u.pm_op.get_avgfreq;
+ *avg_freq = sysctl.u.pm_op.u.get_avgfreq;
return ret;
}
sysctl.cmd = XEN_SYSCTL_pm_op;
sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_cputopo;
sysctl.u.pm_op.cpuid = 0;
- set_xen_guest_handle( sysctl.u.pm_op.get_topo.cpu_to_core,
+ set_xen_guest_handle( sysctl.u.pm_op.u.get_topo.cpu_to_core,
info->cpu_to_core );
- set_xen_guest_handle( sysctl.u.pm_op.get_topo.cpu_to_socket,
+ set_xen_guest_handle( sysctl.u.pm_op.u.get_topo.cpu_to_socket,
info->cpu_to_socket );
- sysctl.u.pm_op.get_topo.max_cpus = info->max_cpus;
+ sysctl.u.pm_op.u.get_topo.max_cpus = info->max_cpus;
rc = do_sysctl(xc_handle, &sysctl);
- info->nr_cpus = sysctl.u.pm_op.get_topo.nr_cpus;
+ info->nr_cpus = sysctl.u.pm_op.u.get_topo.nr_cpus;
return rc;
}
sysctl.cmd = XEN_SYSCTL_pm_op;
sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_sched_opt_smt;
sysctl.u.pm_op.cpuid = 0;
- sysctl.u.pm_op.set_sched_opt_smt = value;
+ sysctl.u.pm_op.u.set_sched_opt_smt = value;
rc = do_sysctl(xc_handle, &sysctl);
return rc;
sysctl.cmd = XEN_SYSCTL_pm_op;
sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_vcpu_migration_delay;
sysctl.u.pm_op.cpuid = 0;
- sysctl.u.pm_op.set_vcpu_migration_delay = value;
+ sysctl.u.pm_op.u.set_vcpu_migration_delay = value;
rc = do_sysctl(xc_handle, &sysctl);
return rc;
rc = do_sysctl(xc_handle, &sysctl);
if (!rc && value)
- *value = sysctl.u.pm_op.get_vcpu_migration_delay;
+ *value = sysctl.u.pm_op.u.get_vcpu_migration_delay;
return rc;
}
sysctl.cmd = XEN_SYSCTL_pm_op;
sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_max_cstate;
sysctl.u.pm_op.cpuid = 0;
- sysctl.u.pm_op.get_max_cstate = 0;
+ sysctl.u.pm_op.u.get_max_cstate = 0;
rc = do_sysctl(xc_handle, &sysctl);
- *value = sysctl.u.pm_op.get_max_cstate;
+ *value = sysctl.u.pm_op.u.get_max_cstate;
return rc;
}
sysctl.cmd = XEN_SYSCTL_pm_op;
sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_max_cstate;
sysctl.u.pm_op.cpuid = 0;
- sysctl.u.pm_op.set_max_cstate = value;
+ sysctl.u.pm_op.u.set_max_cstate = value;
return do_sysctl(xc_handle, &sysctl);
}
rc = 0;
if(!hap_enabled(d))
return -EINVAL;
- d->arch.hvm_domain.mem_sharing_enabled = mec->enable;
+ d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
mem_sharing_audit();
return 0;
}
case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN:
{
- unsigned long gfn = mec->nominate.gfn;
+ unsigned long gfn = mec->u.nominate.u.gfn;
shr_handle_t handle;
if(!mem_sharing_enabled(d))
return -EINVAL;
rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
- mec->nominate.handle = handle;
+ mec->u.nominate.handle = handle;
mem_sharing_audit();
}
break;
case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF:
{
- grant_ref_t gref = mec->nominate.grant_ref;
+ grant_ref_t gref = mec->u.nominate.u.grant_ref;
unsigned long gfn;
shr_handle_t handle;
if(mem_sharing_gref_to_gfn(d, gref, &gfn) < 0)
return -EINVAL;
rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
- mec->nominate.handle = handle;
+ mec->u.nominate.handle = handle;
mem_sharing_audit();
}
break;
case XEN_DOMCTL_MEM_SHARING_OP_SHARE:
{
- shr_handle_t sh = mec->share.source_handle;
- shr_handle_t ch = mec->share.client_handle;
+ shr_handle_t sh = mec->u.share.source_handle;
+ shr_handle_t ch = mec->u.share.client_handle;
rc = mem_sharing_share_pages(sh, ch);
mem_sharing_audit();
}
case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN:
{
- unsigned long gfn = mec->debug.gfn;
+ unsigned long gfn = mec->u.debug.u.gfn;
rc = mem_sharing_debug_gfn(d, gfn);
mem_sharing_audit();
}
case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN:
{
- unsigned long mfn = mec->debug.mfn;
+ unsigned long mfn = mec->u.debug.u.mfn;
rc = mem_sharing_debug_mfn(mfn);
mem_sharing_audit();
}
case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF:
{
- grant_ref_t gref = mec->debug.gref;
+ grant_ref_t gref = mec->u.debug.u.gref;
rc = mem_sharing_debug_gref(d, gref);
mem_sharing_audit();
}
list_for_each(pos, &cpufreq_governor_list)
gov_num++;
- if ( (op->get_para.cpu_num != cpus_weight(policy->cpus)) ||
- (op->get_para.freq_num != pmpt->perf.state_count) ||
- (op->get_para.gov_num != gov_num) )
+ if ( (op->u.get_para.cpu_num != cpus_weight(policy->cpus)) ||
+ (op->u.get_para.freq_num != pmpt->perf.state_count) ||
+ (op->u.get_para.gov_num != gov_num) )
{
- op->get_para.cpu_num = cpus_weight(policy->cpus);
- op->get_para.freq_num = pmpt->perf.state_count;
- op->get_para.gov_num = gov_num;
+ op->u.get_para.cpu_num = cpus_weight(policy->cpus);
+ op->u.get_para.freq_num = pmpt->perf.state_count;
+ op->u.get_para.gov_num = gov_num;
return -EAGAIN;
}
- if ( !(affected_cpus = xmalloc_array(uint32_t, op->get_para.cpu_num)) )
+ if ( !(affected_cpus = xmalloc_array(uint32_t, op->u.get_para.cpu_num)) )
return -ENOMEM;
- memset(affected_cpus, 0, op->get_para.cpu_num * sizeof(uint32_t));
+ memset(affected_cpus, 0, op->u.get_para.cpu_num * sizeof(uint32_t));
for_each_cpu_mask(cpu, policy->cpus)
affected_cpus[j++] = cpu;
- ret = copy_to_guest(op->get_para.affected_cpus,
- affected_cpus, op->get_para.cpu_num);
+ ret = copy_to_guest(op->u.get_para.affected_cpus,
+ affected_cpus, op->u.get_para.cpu_num);
xfree(affected_cpus);
if ( ret )
return ret;
if ( !(scaling_available_frequencies =
- xmalloc_array(uint32_t, op->get_para.freq_num)) )
+ xmalloc_array(uint32_t, op->u.get_para.freq_num)) )
return -ENOMEM;
memset(scaling_available_frequencies, 0,
- op->get_para.freq_num * sizeof(uint32_t));
- for ( i = 0; i < op->get_para.freq_num; i++ )
+ op->u.get_para.freq_num * sizeof(uint32_t));
+ for ( i = 0; i < op->u.get_para.freq_num; i++ )
scaling_available_frequencies[i] =
pmpt->perf.states[i].core_frequency * 1000;
- ret = copy_to_guest(op->get_para.scaling_available_frequencies,
- scaling_available_frequencies, op->get_para.freq_num);
+ ret = copy_to_guest(op->u.get_para.scaling_available_frequencies,
+ scaling_available_frequencies, op->u.get_para.freq_num);
xfree(scaling_available_frequencies);
if ( ret )
return ret;
xfree(scaling_available_governors);
return ret;
}
- ret = copy_to_guest(op->get_para.scaling_available_governors,
+ ret = copy_to_guest(op->u.get_para.scaling_available_governors,
scaling_available_governors, gov_num * CPUFREQ_NAME_LEN);
xfree(scaling_available_governors);
if ( ret )
return ret;
- op->get_para.cpuinfo_cur_freq =
+ op->u.get_para.cpuinfo_cur_freq =
cpufreq_driver->get ? cpufreq_driver->get(op->cpuid) : policy->cur;
- op->get_para.cpuinfo_max_freq = policy->cpuinfo.max_freq;
- op->get_para.cpuinfo_min_freq = policy->cpuinfo.min_freq;
- op->get_para.scaling_cur_freq = policy->cur;
- op->get_para.scaling_max_freq = policy->max;
- op->get_para.scaling_min_freq = policy->min;
+ op->u.get_para.cpuinfo_max_freq = policy->cpuinfo.max_freq;
+ op->u.get_para.cpuinfo_min_freq = policy->cpuinfo.min_freq;
+ op->u.get_para.scaling_cur_freq = policy->cur;
+ op->u.get_para.scaling_max_freq = policy->max;
+ op->u.get_para.scaling_min_freq = policy->min;
if ( cpufreq_driver->name )
- strlcpy(op->get_para.scaling_driver,
+ strlcpy(op->u.get_para.scaling_driver,
cpufreq_driver->name, CPUFREQ_NAME_LEN);
else
- strlcpy(op->get_para.scaling_driver, "Unknown", CPUFREQ_NAME_LEN);
+ strlcpy(op->u.get_para.scaling_driver, "Unknown", CPUFREQ_NAME_LEN);
if ( policy->governor->name )
- strlcpy(op->get_para.scaling_governor,
+ strlcpy(op->u.get_para.scaling_governor,
policy->governor->name, CPUFREQ_NAME_LEN);
else
- strlcpy(op->get_para.scaling_governor, "Unknown", CPUFREQ_NAME_LEN);
+ strlcpy(op->u.get_para.scaling_governor, "Unknown", CPUFREQ_NAME_LEN);
/* governor specific para */
- if ( !strnicmp(op->get_para.scaling_governor,
+ if ( !strnicmp(op->u.get_para.scaling_governor,
"userspace", CPUFREQ_NAME_LEN) )
{
- op->get_para.u.userspace.scaling_setspeed = policy->cur;
+ op->u.get_para.u.userspace.scaling_setspeed = policy->cur;
}
- if ( !strnicmp(op->get_para.scaling_governor,
+ if ( !strnicmp(op->u.get_para.scaling_governor,
"ondemand", CPUFREQ_NAME_LEN) )
{
ret = get_cpufreq_ondemand_para(
- &op->get_para.u.ondemand.sampling_rate_max,
- &op->get_para.u.ondemand.sampling_rate_min,
- &op->get_para.u.ondemand.sampling_rate,
- &op->get_para.u.ondemand.up_threshold);
+ &op->u.get_para.u.ondemand.sampling_rate_max,
+ &op->u.get_para.u.ondemand.sampling_rate_min,
+ &op->u.get_para.u.ondemand.sampling_rate,
+ &op->u.get_para.u.ondemand.up_threshold);
}
return ret;
memcpy(&new_policy, old_policy, sizeof(struct cpufreq_policy));
- new_policy.governor = __find_governor(op->set_gov.scaling_governor);
+ new_policy.governor = __find_governor(op->u.set_gov.scaling_governor);
if (new_policy.governor == NULL)
return -EINVAL;
if ( !policy || !policy->governor )
return -EINVAL;
- switch(op->set_para.ctrl_type)
+ switch(op->u.set_para.ctrl_type)
{
case SCALING_MAX_FREQ:
{
struct cpufreq_policy new_policy;
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
- new_policy.max = op->set_para.ctrl_value;
+ new_policy.max = op->u.set_para.ctrl_value;
ret = __cpufreq_set_policy(policy, &new_policy);
break;
struct cpufreq_policy new_policy;
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
- new_policy.min = op->set_para.ctrl_value;
+ new_policy.min = op->u.set_para.ctrl_value;
ret = __cpufreq_set_policy(policy, &new_policy);
break;
case SCALING_SETSPEED:
{
- unsigned int freq =op->set_para.ctrl_value;
+ unsigned int freq =op->u.set_para.ctrl_value;
if ( !strnicmp(policy->governor->name,
"userspace", CPUFREQ_NAME_LEN) )
case SAMPLING_RATE:
{
- unsigned int sampling_rate = op->set_para.ctrl_value;
+ unsigned int sampling_rate = op->u.set_para.ctrl_value;
if ( !strnicmp(policy->governor->name,
"ondemand", CPUFREQ_NAME_LEN) )
case UP_THRESHOLD:
{
- unsigned int up_threshold = op->set_para.ctrl_value;
+ unsigned int up_threshold = op->u.set_para.ctrl_value;
if ( !strnicmp(policy->governor->name,
"ondemand", CPUFREQ_NAME_LEN) )
if ( !op || !cpu_online(op->cpuid) )
return -EINVAL;
- op->get_avgfreq = cpufreq_driver_getavg(op->cpuid, USR_GETAVG);
+ op->u.get_avgfreq = cpufreq_driver_getavg(op->cpuid, USR_GETAVG);
return 0;
}
XEN_GUEST_HANDLE_64(uint32) cpu_to_socket_arr;
int arr_size, ret=0;
- cpu_to_core_arr = op->get_topo.cpu_to_core;
- cpu_to_socket_arr = op->get_topo.cpu_to_socket;
- arr_size= min_t(uint32_t, op->get_topo.max_cpus, NR_CPUS);
+ cpu_to_core_arr = op->u.get_topo.cpu_to_core;
+ cpu_to_socket_arr = op->u.get_topo.cpu_to_socket;
+ arr_size= min_t(uint32_t, op->u.get_topo.max_cpus, NR_CPUS);
if ( guest_handle_is_null( cpu_to_core_arr ) ||
guest_handle_is_null( cpu_to_socket_arr) )
}
}
- op->get_topo.nr_cpus = nr_cpus + 1;
+ op->u.get_topo.nr_cpus = nr_cpus + 1;
out:
return ret;
}
uint32_t saved_value;
saved_value = sched_smt_power_savings;
- sched_smt_power_savings = !!op->set_sched_opt_smt;
- op->set_sched_opt_smt = saved_value;
+ sched_smt_power_savings = !!op->u.set_sched_opt_smt;
+ op->u.set_sched_opt_smt = saved_value;
break;
}
case XEN_SYSCTL_pm_op_set_vcpu_migration_delay:
{
- set_vcpu_migration_delay(op->set_vcpu_migration_delay);
+ set_vcpu_migration_delay(op->u.set_vcpu_migration_delay);
break;
}
case XEN_SYSCTL_pm_op_get_vcpu_migration_delay:
{
- op->get_vcpu_migration_delay = get_vcpu_migration_delay();
+ op->u.get_vcpu_migration_delay = get_vcpu_migration_delay();
break;
}
case XEN_SYSCTL_pm_op_get_max_cstate:
{
- op->get_max_cstate = acpi_get_cstate_limit();
+ op->u.get_max_cstate = acpi_get_cstate_limit();
break;
}
case XEN_SYSCTL_pm_op_set_max_cstate:
{
- acpi_set_cstate_limit(op->set_max_cstate);
+ acpi_set_cstate_limit(op->u.set_max_cstate);
break;
}
#include "xen.h"
#include "grant_table.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000006
struct xenctl_cpumap {
XEN_GUEST_HANDLE_64(uint8) bitmap;
#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
struct xen_domctl_pin_mem_cacheattr {
uint64_aligned_t start, end;
- unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
+ uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
};
typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
# define XEN_DOMCTL_set_cpuid 49
struct xen_domctl_cpuid {
- unsigned int input[2];
- unsigned int eax;
- unsigned int ebx;
- unsigned int ecx;
- unsigned int edx;
+ uint32_t input[2];
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
};
typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
uint32_t gtsc_khz;
uint32_t incarnation;
uint32_t pad;
- uint64_t elapsed_nsec;
+ uint64_aligned_t elapsed_nsec;
};
typedef struct xen_guest_tsc_info xen_guest_tsc_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t);
#define XEN_DOMCTL_gdbsx_guestmemio 1000 /* guest mem io */
struct xen_domctl_gdbsx_memio {
+ /* IN */
uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */
uint64_aligned_t gva; /* guest virtual address */
uint64_aligned_t uva; /* user buffer virtual address */
- int len; /* number of bytes to read/write */
- int gwr; /* 0 = read from guest. 1 = write to guest */
- int remain; /* bytes remaining to be copied */
+ uint32_t len; /* number of bytes to read/write */
+ uint8_t gwr; /* 0 = read from guest. 1 = write to guest */
+ /* OUT */
+ uint32_t remain; /* bytes remaining to be copied */
};
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_domstatus 1003
struct xen_domctl_gdbsx_domstatus {
- int paused; /* is the domain paused */
+ /* OUT */
+ uint8_t paused; /* is the domain paused */
uint32_t vcpu_id; /* any vcpu in an event? */
uint32_t vcpu_ev; /* if yes, what event? */
-
};
/*
uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */
/* OP_ENABLE */
- unsigned long shared_addr; /* IN: Virtual address of shared page */
- unsigned long ring_addr; /* IN: Virtual address of ring page */
+ uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */
+ uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */
/* Other OPs */
- unsigned long gfn; /* IN: gfn of page being operated on */
+ uint64_aligned_t gfn; /* IN: gfn of page being operated on */
};
typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
#define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9)
struct xen_domctl_mem_sharing_op {
- uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */
+ uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */
union {
- int enable; /* for OP_CONTROL */
+ uint8_t enable; /* OP_CONTROL */
- struct mem_sharing_op_nominate { /* for OP_NOMINATE */
+ struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
union {
- unsigned long gfn; /* IN: gfn to nominate */
+ uint64_aligned_t gfn; /* IN: gfn to nominate */
uint32_t grant_ref; /* IN: grant ref to nominate */
- };
- uint64_t handle; /* OUT: the handle */
+ } u;
+ uint64_aligned_t handle; /* OUT: the handle */
} nominate;
- struct mem_sharing_op_share {
- uint64_t source_handle; /* IN: handle to the source page */
- uint64_t client_handle; /* IN: handle to the client page */
+ struct mem_sharing_op_share { /* OP_SHARE */
+ uint64_aligned_t source_handle; /* IN: handle to the source page */
+ uint64_aligned_t client_handle; /* IN: handle to the client page */
} share;
- struct mem_sharing_op_debug {
+ struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
union {
- unsigned long gfn; /* IN: gfn to debug */
- unsigned long mfn; /* IN: mfn to debug */
+ uint64_aligned_t gfn; /* IN: gfn to debug */
+ uint64_aligned_t mfn; /* IN: mfn to debug */
grant_ref_t gref; /* IN: gref to debug */
- };
+ } u;
} debug;
- };
+ } u;
};
typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
uint32_t set_max_cstate;
uint32_t get_vcpu_migration_delay;
uint32_t set_vcpu_migration_delay;
- };
+ } u;
};
#define XEN_SYSCTL_page_offline_op 14