The trailing _domain suffix is redundant, but adds to code volume. Drop it.
Reflow lines as appropriate, and switch to using the new XFREE/etc wrappers
where applicable.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct domain *nextd = next ? next->domain : NULL;
const struct cpuidmasks *masks =
- (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
- ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+ ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
uint64_t val = masks->_1cd;
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct domain *nextd = next ? next->domain : NULL;
const struct cpuidmasks *masks =
- (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
- ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+ ? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
if (msr_basic) {
uint64_t val = masks->_1cd;
if ( d != current->domain && !VM_ASSIST(d, m2p_strict) &&
is_pv_domain(d) && !is_pv_32bit_domain(d) &&
test_bit(VMASST_TYPE_m2p_strict, &c.nat->vm_assist) &&
- atomic_read(&d->arch.pv_domain.nr_l4_pages) )
+ atomic_read(&d->arch.pv.nr_l4_pages) )
{
bool done = false;
if ( !v || !is_pv_vcpu(v) )
return mfn_to_virt(mfn_x(mfn));
- dcache = &v->domain->arch.pv_domain.mapcache;
+ dcache = &v->domain->arch.pv.mapcache;
vcache = &v->arch.pv_vcpu.mapcache;
if ( !dcache->inuse )
return mfn_to_virt(mfn_x(mfn));
v = mapcache_current_vcpu();
ASSERT(v && is_pv_vcpu(v));
- dcache = &v->domain->arch.pv_domain.mapcache;
+ dcache = &v->domain->arch.pv.mapcache;
ASSERT(dcache->inuse);
idx = PFN_DOWN(va - MAPCACHE_VIRT_START);
int mapcache_domain_init(struct domain *d)
{
- struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
+ struct mapcache_domain *dcache = &d->arch.pv.mapcache;
unsigned int bitmap_pages;
ASSERT(is_pv_domain(d));
int mapcache_vcpu_init(struct vcpu *v)
{
struct domain *d = v->domain;
- struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
+ struct mapcache_domain *dcache = &d->arch.pv.mapcache;
unsigned long i;
unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
break;
}
- d->arch.pv_domain.cpuidmasks->_1cd = mask;
+ d->arch.pv.cpuidmasks->_1cd = mask;
}
break;
if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
mask &= (~0ULL << 32) | ctl->ecx;
- d->arch.pv_domain.cpuidmasks->_6c = mask;
+ d->arch.pv.cpuidmasks->_6c = mask;
}
break;
if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
mask &= ((uint64_t)eax << 32) | ebx;
- d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
+ d->arch.pv.cpuidmasks->_7ab0 = mask;
}
/*
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
mask &= (~0ULL << 32) | eax;
- d->arch.pv_domain.cpuidmasks->Da1 = mask;
+ d->arch.pv.cpuidmasks->Da1 = mask;
}
break;
break;
}
- d->arch.pv_domain.cpuidmasks->e1cd = mask;
+ d->arch.pv.cpuidmasks->e1cd = mask;
}
break;
struct domain *d = v->domain;
v->arch.cr3 = mfn_x(mfn) << PAGE_SHIFT;
- if ( is_pv_domain(d) && d->arch.pv_domain.pcid )
+ if ( is_pv_domain(d) && d->arch.pv.pcid )
v->arch.cr3 |= get_pcid_bits(v, false);
}
cr4 |= mmu_cr4_features & (X86_CR4_PSE | X86_CR4_SMEP | X86_CR4_SMAP |
X86_CR4_OSXSAVE | X86_CR4_FSGSBASE);
- if ( d->arch.pv_domain.pcid )
+ if ( d->arch.pv.pcid )
cr4 |= X86_CR4_PCIDE;
- else if ( !d->arch.pv_domain.xpti )
+ else if ( !d->arch.pv.xpti )
cr4 |= X86_CR4_PGE;
cr4 |= d->arch.vtsc ? X86_CR4_TSD : 0;
? pv_guest_cr4_to_real_cr4(v)
: ((read_cr4() & ~(X86_CR4_PCIDE | X86_CR4_TSD)) | X86_CR4_PGE);
- if ( is_pv_vcpu(v) && v->domain->arch.pv_domain.xpti )
+ if ( is_pv_vcpu(v) && v->domain->arch.pv.xpti )
{
cpu_info->root_pgt_changed = true;
cpu_info->pv_cr3 = __pa(this_cpu(root_pgt));
{
init_xen_l4_slots(pl4e, _mfn(pfn),
d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
- atomic_inc(&d->arch.pv_domain.nr_l4_pages);
+ atomic_inc(&d->arch.pv.nr_l4_pages);
}
unmap_domain_page(pl4e);
if ( rc >= 0 )
{
- atomic_dec(&d->arch.pv_domain.nr_l4_pages);
+ atomic_dec(&d->arch.pv.nr_l4_pages);
rc = 0;
}
break;
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
- if ( !rc && pt_owner->arch.pv_domain.xpti )
+ if ( !rc && pt_owner->arch.pv.xpti )
{
bool local_in_use = false;
if ( compat32 )
{
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
- d->arch.pv_domain.xpti = false;
- d->arch.pv_domain.pcid = false;
+ d->arch.pv.xpti = false;
+ d->arch.pv.pcid = false;
v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
if ( setup_compat_arg_xlat(v) != 0 )
BUG();
d->arch.x87_fip_width = 4;
- d->arch.pv_domain.xpti = false;
- d->arch.pv_domain.pcid = false;
+ d->arch.pv.xpti = false;
+ d->arch.pv.pcid = false;
return 0;
{
return create_perdomain_mapping(v->domain, GDT_VIRT_START(v),
1U << GDT_LDT_VCPU_SHIFT,
- v->domain->arch.pv_domain.gdt_ldt_l1tab,
+ v->domain->arch.pv.gdt_ldt_l1tab,
NULL);
}
destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
- xfree(d->arch.pv_domain.cpuidmasks);
- d->arch.pv_domain.cpuidmasks = NULL;
+ XFREE(d->arch.pv.cpuidmasks);
- free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
- d->arch.pv_domain.gdt_ldt_l1tab = NULL;
+ FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab);
}
pv_l1tf_domain_init(d);
- d->arch.pv_domain.gdt_ldt_l1tab =
+ d->arch.pv.gdt_ldt_l1tab =
alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
- if ( !d->arch.pv_domain.gdt_ldt_l1tab )
+ if ( !d->arch.pv.gdt_ldt_l1tab )
goto fail;
- clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ clear_page(d->arch.pv.gdt_ldt_l1tab);
if ( levelling_caps & ~LCAP_faulting &&
- (d->arch.pv_domain.cpuidmasks = xmemdup(&cpuidmask_defaults)) == NULL )
+ (d->arch.pv.cpuidmasks = xmemdup(&cpuidmask_defaults)) == NULL )
goto fail;
rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
/* 64-bit PV guest by default. */
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- d->arch.pv_domain.xpti = opt_xpti & (is_hardware_domain(d)
- ? OPT_XPTI_DOM0 : OPT_XPTI_DOMU);
+ d->arch.pv.xpti = opt_xpti & (is_hardware_domain(d)
+ ? OPT_XPTI_DOM0 : OPT_XPTI_DOMU);
if ( !is_pv_32bit_domain(d) && use_invpcid && cpu_has_pcid )
switch ( opt_pcid )
break;
case PCID_ALL:
- d->arch.pv_domain.pcid = true;
+ d->arch.pv.pcid = true;
break;
case PCID_XPTI:
- d->arch.pv_domain.pcid = d->arch.pv_domain.xpti;
+ d->arch.pv.pcid = d->arch.pv.xpti;
break;
case PCID_NOXPTI:
- d->arch.pv_domain.pcid = !d->arch.pv_domain.xpti;
+ d->arch.pv.pcid = !d->arch.pv.xpti;
break;
default:
v->arch.flags ^= TF_kernel_mode;
update_cr3(v);
- if ( d->arch.pv_domain.xpti )
+ if ( d->arch.pv.xpti )
{
struct cpu_info *cpu_info = get_cpu_info();
cpu_info->root_pgt_changed = true;
cpu_info->pv_cr3 = __pa(this_cpu(root_pgt)) |
- (d->arch.pv_domain.pcid
- ? get_pcid_bits(v, true) : 0);
+ (d->arch.pv.pcid ? get_pcid_bits(v, true) : 0);
}
/* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
struct list_head pdev_list;
union {
- struct pv_domain pv_domain;
+ struct pv_domain pv;
struct hvm_domain hvm_domain;
};
#define gdt_ldt_pt_idx(v) \
((v)->vcpu_id >> (PAGETABLE_ORDER - GDT_LDT_VCPU_SHIFT))
#define pv_gdt_ptes(v) \
- ((v)->domain->arch.pv_domain.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
+ ((v)->domain->arch.pv.gdt_ldt_l1tab[gdt_ldt_pt_idx(v)] + \
(((v)->vcpu_id << GDT_LDT_VCPU_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)))
#define pv_ldt_ptes(v) (pv_gdt_ptes(v) + 16)
#define flush_root_pgtbl_domain(d) \
{ \
- if ( is_pv_domain(d) && (d)->arch.pv_domain.xpti ) \
+ if ( is_pv_domain(d) && (d)->arch.pv.xpti ) \
flush_mask((d)->dirty_cpumask, FLUSH_ROOT_PGTBL); \
}
ASSERT(is_pv_domain(d));
ASSERT(!(pte & _PAGE_PRESENT));
- if ( d->arch.pv_domain.check_l1tf && !paging_mode_sh_forced(d) &&
+ if ( d->arch.pv.check_l1tf && !paging_mode_sh_forced(d) &&
(((level > 1) && (pte & _PAGE_PSE)) || !is_l1tf_safe_maddr(pte)) )
{
#ifdef CONFIG_SHADOW_PAGING
static inline void pv_l1tf_domain_init(struct domain *d)
{
- d->arch.pv_domain.check_l1tf =
+ d->arch.pv.check_l1tf =
opt_pv_l1tf & (is_hardware_domain(d)
? OPT_PV_L1TF_DOM0 : OPT_PV_L1TF_DOMU);