... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.
In a few cases this includes an additional is_pv_32bit_vcpu() ->
is_pv_32bit_domain() conversion.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
mctelem_cookie_t cookie = ID2COOKIE(mc_fetch.nat->fetch_id);
mctelem_ack(which, cookie);
} else {
- if (!is_pv_32on64_vcpu(v)
+ if (!is_pv_32bit_vcpu(v)
? guest_handle_is_null(mc_fetch.nat->data)
: compat_handle_is_null(mc_fetch.cmp->data))
return x86_mcerr("do_mca fetch: guest buffer "
if ((mctc = mctelem_consume_oldest_begin(which))) {
struct mc_info *mcip = mctelem_dataptr(mctc);
- if (!is_pv_32on64_vcpu(v)
+ if (!is_pv_32bit_vcpu(v)
? copy_to_guest(mc_fetch.nat->data, mcip, 1)
: copy_to_compat(mc_fetch.cmp->data,
mcip, 1)) {
mc_physcpuinfo.nat = &op->u.mc_physcpuinfo;
nlcpu = num_online_cpus();
- if (!is_pv_32on64_vcpu(v)
+ if (!is_pv_32bit_vcpu(v)
? !guest_handle_is_null(mc_physcpuinfo.nat->info)
: !compat_handle_is_null(mc_physcpuinfo.cmp->info)) {
if (mc_physcpuinfo.nat->ncpus <= 0)
if (log_cpus == NULL)
return x86_mcerr("do_mca cpuinfo", -ENOMEM);
on_each_cpu(do_mc_get_cpu_info, log_cpus, 1);
- if (!is_pv_32on64_vcpu(v)
+ if (!is_pv_32bit_vcpu(v)
? copy_to_guest(mc_physcpuinfo.nat->info,
log_cpus, nlcpu)
: copy_to_compat(mc_physcpuinfo.cmp->info,
void vcpu_destroy(struct vcpu *v)
{
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_vcpu(v) )
{
free_compat_arg_xlat(v);
release_compat_l4(v);
curr->arch.hvm_vcpu.hcall_preempted = 1;
if ( is_pv_vcpu(curr) ?
- !is_pv_32on64_vcpu(curr) :
+ !is_pv_32bit_vcpu(curr) :
curr->arch.hvm_vcpu.hcall_64bit )
{
for ( i = 0; *p != '\0'; i++ )
if ( rc )
return rc;
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_vcpu(v) )
{
l4tab = map_domain_page(mfn);
mfn = l4e_get_pfn(*l4tab);
struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long args[6];
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_pv_32bit_vcpu(current) )
{
args[0] = regs->ebx;
args[1] = regs->ecx;
void __trace_pv_trap(int trapnr, unsigned long eip,
int use_error_code, unsigned error_code)
{
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_pv_32bit_vcpu(current) )
{
struct __packed {
unsigned eip:32,
{
unsigned long eip = guest_cpu_user_regs()->eip;
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_pv_32bit_vcpu(current) )
{
struct __packed {
u32 eip, addr, error_code;
void __trace_trap_one_addr(unsigned event, unsigned long va)
{
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_pv_32bit_vcpu(current) )
{
u32 d = va;
__trace_var(event, 1, sizeof(d), &d);
void __trace_trap_two_addr(unsigned event, unsigned long va1,
unsigned long va2)
{
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_pv_32bit_vcpu(current) )
{
struct __packed {
u32 va1, va2;
* cases, "unsigned long" is the size of a guest virtual address.
*/
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_pv_32bit_vcpu(current) )
{
struct __packed {
l1_pgentry_t pte;
if ( is_hvm_vcpu(v) )
return;
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_vcpu(v) )
{
compat_show_guest_stack(v, regs, debug_stack_lines);
return;
{
unsigned long mfn;
- if ( !is_pv_32on64_vcpu(v) )
+ if ( !is_pv_32bit_domain(currd) )
{
mfn = pagetable_get_pfn(v->arch.guest_table);
*reg = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn));
unsigned long gfn;
struct page_info *page;
- gfn = !is_pv_32on64_vcpu(v)
+ gfn = !is_pv_32bit_domain(currd)
? xen_cr3_to_pfn(*reg) : compat_cr3_to_pfn(*reg);
page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
if ( page )
switch ( regs->_ecx )
{
case MSR_FS_BASE:
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_domain(currd) )
goto fail;
wrfsbase(msr_content);
v->arch.pv_vcpu.fs_base = msr_content;
break;
case MSR_GS_BASE:
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_domain(currd) )
goto fail;
wrgsbase(msr_content);
v->arch.pv_vcpu.gs_base_kernel = msr_content;
break;
case MSR_SHADOW_GS_BASE:
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_domain(currd) )
goto fail;
if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )
goto fail;
switch ( regs->_ecx )
{
case MSR_FS_BASE:
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_domain(currd) )
goto fail;
val = cpu_has_fsgsbase ? __rdfsbase() : v->arch.pv_vcpu.fs_base;
goto rdmsr_writeback;
case MSR_GS_BASE:
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_domain(currd) )
goto fail;
val = cpu_has_fsgsbase ? __rdgsbase()
: v->arch.pv_vcpu.gs_base_kernel;
goto rdmsr_writeback;
case MSR_SHADOW_GS_BASE:
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32bit_domain(currd) )
goto fail;
val = v->arch.pv_vcpu.gs_base_user;
goto rdmsr_writeback;
return;
}
}
- else if ( is_pv_32on64_vcpu(v) && regs->error_code )
+ else if ( is_pv_32bit_vcpu(v) && regs->error_code )
{
emulate_gate_op(regs);
return;
#define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
#define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
-#define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
#define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
}
else
{
- desc = (!is_pv_32on64_vcpu(v)
+ desc = (!is_pv_32bit_vcpu(v)
? this_cpu(gdt_table) : this_cpu(compat_gdt_table))
+ LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY;
_set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, SYS_DESC_ldt);
#ifdef CONFIG_COMPAT
if ( has_hvm_container_vcpu(current) ?
hvm_guest_x86_mode(current) != 8 :
- is_pv_32on64_vcpu(current) )
+ is_pv_32bit_vcpu(current) )
{
int rc;
enum XLAT_tmem_op_u u;