return 0;
}
-int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val)
+int vmx_read_guest_msr(u32 msr, u64 *val)
{
- unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
- const struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
+ struct vcpu *curr = current;
+ unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+ const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
for ( i = 0; i < msr_count; i++ )
{
return -ESRCH;
}
-int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val)
+int vmx_write_guest_msr(u32 msr, u64 val)
{
- unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
- struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
+ struct vcpu *curr = current;
+ unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+ struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
for ( i = 0; i < msr_count; i++ )
{
return -ESRCH;
}
-int vmx_add_guest_msr(struct vcpu *v, u32 msr)
+int vmx_add_guest_msr(u32 msr)
{
- unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
- struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
-
- for ( i = 0; i < msr_count; i++ )
- if ( msr_area[i].index == msr )
- return 0;
-
- if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
- return -ENOSPC;
+ struct vcpu *curr = current;
+ unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+ struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
if ( msr_area == NULL )
{
if ( (msr_area = alloc_xenheap_page()) == NULL )
return -ENOMEM;
- v->arch.hvm_vmx.msr_area = msr_area;
+ curr->arch.hvm_vmx.msr_area = msr_area;
__vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
}
+ for ( i = 0; i < msr_count; i++ )
+ if ( msr_area[i].index == msr )
+ return 0;
+
+ if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+ return -ENOSPC;
+
msr_area[msr_count].index = msr;
msr_area[msr_count].mbz = 0;
msr_area[msr_count].data = 0;
- v->arch.hvm_vmx.msr_count = ++msr_count;
+ curr->arch.hvm_vmx.msr_count = ++msr_count;
__vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
__vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
return 0;
}
-int vmx_add_host_load_msr(struct vcpu *v, u32 msr)
+int vmx_add_host_load_msr(u32 msr)
{
- unsigned int i, msr_count = v->arch.hvm_vmx.host_msr_count;
- struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.host_msr_area;
-
- for ( i = 0; i < msr_count; i++ )
- if ( msr_area[i].index == msr )
- return 0;
-
- if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
- return -ENOSPC;
+ struct vcpu *curr = current;
+ unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
+ struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
if ( msr_area == NULL )
{
if ( (msr_area = alloc_xenheap_page()) == NULL )
return -ENOMEM;
- v->arch.hvm_vmx.host_msr_area = msr_area;
+ curr->arch.hvm_vmx.host_msr_area = msr_area;
__vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
}
+ for ( i = 0; i < msr_count; i++ )
+ if ( msr_area[i].index == msr )
+ return 0;
+
+ if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+ return -ENOSPC;
+
msr_area[msr_count].index = msr;
msr_area[msr_count].mbz = 0;
rdmsrl(msr, msr_area[msr_count].data);
- v->arch.hvm_vmx.host_msr_count = ++msr_count;
+ curr->arch.hvm_vmx.host_msr_count = ++msr_count;
__vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
return 0;
struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
int rc;
- if ( arch_vmx->vmcs == NULL )
- {
- if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
- return -ENOMEM;
+ if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
+ return -ENOMEM;
- INIT_LIST_HEAD(&arch_vmx->active_list);
- __vmpclear(virt_to_maddr(arch_vmx->vmcs));
- arch_vmx->active_cpu = -1;
- arch_vmx->launched = 0;
- }
+ INIT_LIST_HEAD(&arch_vmx->active_list);
+ __vmpclear(virt_to_maddr(arch_vmx->vmcs));
+ arch_vmx->active_cpu = -1;
+ arch_vmx->launched = 0;
if ( (rc = construct_vmcs(v)) != 0 )
{
vmx_free_vmcs(arch_vmx->vmcs);
- arch_vmx->vmcs = NULL;
return rc;
}
{
struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
- if ( arch_vmx->vmcs == NULL )
- return;
-
vmx_clear_vmcs(v);
vmx_free_vmcs(arch_vmx->vmcs);
- arch_vmx->vmcs = NULL;
+
+ free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
+ free_xenheap_page(v->arch.hvm_vmx.msr_area);
+ free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
}
void vm_launch_fail(void)
goto done;
}
- if ( vmx_read_guest_msr(v, ecx, &msr_content) == 0 )
+ if ( vmx_read_guest_msr(ecx, &msr_content) == 0 )
break;
if ( is_last_branch_msr(ecx) )
for ( ; (rc == 0) && lbr->count; lbr++ )
for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
- if ( (rc = vmx_add_guest_msr(v, lbr->base + i)) == 0 )
+ if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
vmx_disable_intercept_for_msr(v, lbr->base + i);
}
if ( (rc < 0) ||
- (vmx_add_host_load_msr(v, ecx) < 0) )
+ (vmx_add_host_load_msr(ecx) < 0) )
vmx_inject_hw_exception(v, TRAP_machine_check, 0);
else
{
switch ( long_mode_do_msr_write(regs) )
{
case HNDL_unhandled:
- if ( (vmx_write_guest_msr(v, ecx, msr_content) != 0) &&
+ if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
!is_last_branch_msr(ecx) )
wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
break;
return 0;
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
- if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
+ if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
return 0;
- if ( vmx_add_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
+ if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
return 0;
- vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
+ vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
(core2_get_pmc_count()-1)*sizeof(char));
break;
case MSR_CORE_PERF_FIXED_CTR_CTRL:
non_global_ctrl = msr_content;
- vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+ vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
global_ctrl >>= 32;
for ( i = 0; i < 3; i++ )
{
break;
default:
tmp = ecx - MSR_P6_EVNTSEL0;
- vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+ vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
if ( tmp >= 0 && tmp < core2_get_pmc_count() )
core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
(global_ctrl >> tmp) & (msr_content >> 22) & 1;
if ( type != MSR_TYPE_GLOBAL )
wrmsrl(ecx, msr_content);
else
- vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
return 1;
}
msr_content = core2_vpmu_cxt->global_ovf_status;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
- vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
+ vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
break;
default:
rdmsrl(regs->ecx, msr_content);