hypercall.op = __HYPERVISOR_xsm_op;
hypercall.arg[0] = (unsigned long)&acmctl;
- if ( lock_pages(&acmctl, sizeof(acmctl)) != 0)
+ if ( lock_pages(xch, &acmctl, sizeof(acmctl)) != 0)
{
PERROR("Could not lock memory for Xen hypercall");
return -EFAULT;
DPRINTF("acmctl operation failed -- need to"
" rebuild the user-space tool set?\n");
}
- unlock_pages(&acmctl, sizeof(acmctl));
+ unlock_pages(xch, &acmctl, sizeof(acmctl));
switch (cmd) {
case ACMOP_getdecision: {
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
break;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(xch, local, sizeof (local));
if ( err < 0 )
break;
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
- if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
return err;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(xch, local, sizeof (local));
if (err < 0)
return err;
arg.domain_id = domid;
arg.reason = reason;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(xch, &arg, sizeof(arg)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
out1:
return ret;
domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
- if ( lock_pages(local, cpusize) != 0 )
+ if ( lock_pages(xch, local, cpusize) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
ret = do_domctl(xch, &domctl);
- unlock_pages(local, cpusize);
+ unlock_pages(xch, local, cpusize);
out:
free(local);
set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
- if ( lock_pages(local, sizeof(local)) != 0 )
+ if ( lock_pages(xch, local, sizeof(local)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
ret = do_domctl(xch, &domctl);
- unlock_pages(local, sizeof (local));
+ unlock_pages(xch, local, sizeof (local));
bitmap_byte_to_64(cpumap, local, cpusize * 8);
out:
free(local);
int ret = 0;
DECLARE_SYSCTL;
- if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
+ if ( lock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
return -1;
sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
else
ret = sysctl.u.getdomaininfolist.num_domains;
- unlock_pages(info, max_domains*sizeof(xc_domaininfo_t));
+ unlock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t));
return ret;
}
set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
if ( ctxt_buf )
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
return ret;
ret = do_domctl(xch, &domctl);
if ( ctxt_buf )
- unlock_pages(ctxt_buf, size);
+ unlock_pages(xch, ctxt_buf, size);
return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
}
domctl.u.hvmcontext_partial.instance = instance;
set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
return ret;
ret = do_domctl(xch, &domctl);
if ( ctxt_buf )
- unlock_pages(ctxt_buf, size);
+ unlock_pages(xch, ctxt_buf, size);
return ret ? -1 : 0;
}
domctl.u.hvmcontext.size = size;
set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
return ret;
ret = do_domctl(xch, &domctl);
- unlock_pages(ctxt_buf, size);
+ unlock_pages(xch, ctxt_buf, size);
return ret;
}
set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
- if ( (rc = lock_pages(ctxt, sz)) != 0 )
+ if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
return rc;
rc = do_domctl(xch, &domctl);
- unlock_pages(ctxt, sz);
+ unlock_pages(xch, ctxt, sz);
return rc;
}
arg.id = id;
arg.timeout = timeout;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(xch, &arg, sizeof(arg)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
out1:
return ret;
set_xen_guest_handle(fmap.map.buffer, &e820);
- if ( lock_pages(&fmap, sizeof(fmap)) || lock_pages(&e820, sizeof(e820)) )
+ if ( lock_pages(xch, &fmap, sizeof(fmap)) || lock_pages(xch, &e820, sizeof(e820)) )
{
PERROR("Could not lock memory for Xen hypercall");
rc = -1;
rc = xc_memory_op(xch, XENMEM_set_memory_map, &fmap);
out:
- unlock_pages(&fmap, sizeof(fmap));
- unlock_pages(&e820, sizeof(e820));
+ unlock_pages(xch, &fmap, sizeof(fmap));
+ unlock_pages(xch, &e820, sizeof(e820));
return rc;
}
#else
domctl.cmd = XEN_DOMCTL_gettscinfo;
domctl.domain = (domid_t)domid;
set_xen_guest_handle(domctl.u.tsc_info.out_info, &info);
- if ( (rc = lock_pages(&info, sizeof(info))) != 0 )
+ if ( (rc = lock_pages(xch, &info, sizeof(info))) != 0 )
return rc;
rc = do_domctl(xch, &domctl);
if ( rc == 0 )
*gtsc_khz = info.gtsc_khz;
*incarnation = info.incarnation;
}
- unlock_pages(&info,sizeof(info));
+ unlock_pages(xch, &info,sizeof(info));
return rc;
}
domctl.u.vcpucontext.vcpu = vcpu;
set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
- if ( (rc = lock_pages(ctxt, sz)) != 0 )
+ if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
return rc;
rc = do_domctl(xch, &domctl);
- unlock_pages(ctxt, sz);
+ unlock_pages(xch, ctxt, sz);
return rc;
}
arg.domid = dom;
arg.index = param;
arg.value = value;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(handle, &arg, sizeof(arg)) != 0 )
return -1;
rc = do_xen_hypercall(handle, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(handle, &arg, sizeof(arg));
return rc;
}
hypercall.arg[1] = (unsigned long)&arg;
arg.domid = dom;
arg.index = param;
- if ( lock_pages(&arg, sizeof(arg)) != 0 )
+ if ( lock_pages(handle, &arg, sizeof(arg)) != 0 )
return -1;
rc = do_xen_hypercall(handle, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(handle, &arg, sizeof(arg));
*value = arg.value;
return rc;
}
set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
- if ( lock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 )
+ if ( lock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 )
{
PERROR("Could not lock memory for xc_get_device_group");
return -ENOMEM;
}
rc = do_domctl(xch, &domctl);
- unlock_pages(sdev_array, max_sdevs * sizeof(*sdev_array));
+ unlock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array));
*num_sdevs = domctl.u.get_device_group.num_sdevs;
return rc;
memset(ctx->p2m_batch, 0,
ROUNDUP(MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
- if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
+ if ( lock_pages(xch, region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
{
PERROR("Could not lock region_mfn");
goto out;
}
- if ( lock_pages(ctx->p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
+ if ( lock_pages(xch, ctx->p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
{
ERROR("Could not lock p2m_batch");
goto out;
}
}
- if ( lock_pages(&ctxt, sizeof(ctxt)) )
+ if ( lock_pages(xch, &ctxt, sizeof(ctxt)) )
{
PERROR("Unable to lock ctxt");
return 1;
memset(to_send, 0xff, BITMAP_SIZE);
- if ( lock_pages(to_send, BITMAP_SIZE) )
+ if ( lock_pages(xch, to_send, BITMAP_SIZE) )
{
PERROR("Unable to lock to_send");
return 1;
}
/* (to fix is local only) */
- if ( lock_pages(to_skip, BITMAP_SIZE) )
+ if ( lock_pages(xch, to_skip, BITMAP_SIZE) )
{
PERROR("Unable to lock to_skip");
return 1;
memset(pfn_type, 0,
ROUNDUP(MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));
- if ( lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
+ if ( lock_pages(xch, pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
{
PERROR("Unable to lock pfn_type array");
goto out;
hypercall.arg[0] = cmd;
hypercall.arg[1] = (unsigned long)arg;
- if ( lock_pages(arg, arg_size) != 0 )
+ if ( lock_pages(xch, arg, arg_size) != 0 )
{
PERROR("do_evtchn_op: arg lock failed");
goto out;
if ((ret = do_xen_hypercall(xch, &hypercall)) < 0 && !silently_fail)
ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret);
- unlock_pages(arg, arg_size);
+ unlock_pages(xch, arg, arg_size);
out:
return ret;
}
hypercall.op = __HYPERVISOR_xsm_op;
hypercall.arg[0] = (unsigned long)op;
- if ( lock_pages(op, sizeof(*op)) != 0 )
+ if ( lock_pages(xch, op, sizeof(*op)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
fprintf(stderr, "XSM operation failed!\n");
}
- unlock_pages(op, sizeof(*op));
+ unlock_pages(xch, op, sizeof(*op));
out:
return ret;
hypercall.arg[1] = (unsigned long)op;
hypercall.arg[2] = count;
- if ( lock_pages(op, count* op_size) != 0 )
+ if ( lock_pages(xch, op, count* op_size) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(op, count * op_size);
+ unlock_pages(xch, op, count * op_size);
out1:
return ret;
*gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) );
frame_list = malloc(query.nr_frames * sizeof(unsigned long));
- if ( !frame_list || lock_pages(frame_list,
+ if ( !frame_list || lock_pages(xch, frame_list,
query.nr_frames * sizeof(unsigned long)) )
{
ERROR("Alloc/lock frame_list in xc_gnttab_map_table\n");
err:
if ( frame_list )
{
- unlock_pages(frame_list, query.nr_frames * sizeof(unsigned long));
+ unlock_pages(xch, frame_list, query.nr_frames * sizeof(unsigned long));
free(frame_list);
}
if ( pfn_list )
sysctl.u.readconsole.incremental = incremental;
}
- if ( (ret = lock_pages(buffer, nr_chars)) != 0 )
+ if ( (ret = lock_pages(xch, buffer, nr_chars)) != 0 )
return ret;
if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
*pindex = sysctl.u.readconsole.index;
}
- unlock_pages(buffer, nr_chars);
+ unlock_pages(xch, buffer, nr_chars);
return ret;
}
set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
sysctl.u.debug_keys.nr_keys = len;
- if ( (ret = lock_pages(keys, len)) != 0 )
+ if ( (ret = lock_pages(xch, keys, len)) != 0 )
return ret;
ret = do_sysctl(xch, &sysctl);
- unlock_pages(keys, len);
+ unlock_pages(xch, keys, len);
return ret;
}
DECLARE_HYPERCALL;
mc->interface_version = XEN_MCA_INTERFACE_VERSION;
- if ( lock_pages(mc, sizeof(mc)) )
+ if ( lock_pages(xch, mc, sizeof(mc)) )
{
PERROR("Could not lock xen_mc memory");
return -EINVAL;
hypercall.op = __HYPERVISOR_mca;
hypercall.arg[0] = (unsigned long)mc;
ret = do_xen_hypercall(xch, &hypercall);
- unlock_pages(mc, sizeof(mc));
+ unlock_pages(xch, mc, sizeof(mc));
return ret;
}
#endif
sysctl.u.getcpuinfo.max_cpus = max_cpus;
set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
- if ( (rc = lock_pages(info, max_cpus*sizeof(*info))) != 0 )
+ if ( (rc = lock_pages(xch, info, max_cpus*sizeof(*info))) != 0 )
return rc;
rc = do_sysctl(xch, &sysctl);
- unlock_pages(info, max_cpus*sizeof(*info));
+ unlock_pages(xch, info, max_cpus*sizeof(*info));
if ( nr_cpus )
*nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
struct xen_hvm_set_pci_intx_level _arg, *arg = &_arg;
int rc;
- if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
+ if ( (rc = hcall_buf_prep(xch, (void **)&arg, sizeof(*arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
rc = do_xen_hypercall(xch, &hypercall);
- hcall_buf_release((void **)&arg, sizeof(*arg));
+ hcall_buf_release(xch, (void **)&arg, sizeof(*arg));
return rc;
}
struct xen_hvm_set_isa_irq_level _arg, *arg = &_arg;
int rc;
- if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
+ if ( (rc = hcall_buf_prep(xch, (void **)&arg, sizeof(*arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
rc = do_xen_hypercall(xch, &hypercall);
- hcall_buf_release((void **)&arg, sizeof(*arg));
+ hcall_buf_release(xch, (void **)&arg, sizeof(*arg));
return rc;
}
arg.link = link;
arg.isa_irq = isa_irq;
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
arg.nr = nr;
set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap);
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
arg.first_pfn = first_pfn;
arg.nr = nr;
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
arg.first_pfn = first_pfn;
arg.nr = nr;
- if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 )
{
PERROR("Could not lock memory");
return rc;
rc = do_xen_hypercall(xch, &hypercall);
- unlock_pages(&arg, sizeof(arg));
+ unlock_pages(xch, &arg, sizeof(arg));
return rc;
}
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
+ if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
{
ERROR("Could not lock memory for xc_mark_page_online\n");
return -EINVAL;
set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
+ unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
return ret;
}
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
+ if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
{
ERROR("Could not lock memory for xc_mark_page_offline");
return -EINVAL;
set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
+ unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
return ret;
}
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
+ if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
{
ERROR("Could not lock memory for xc_query_page_offline_status\n");
return -EINVAL;
set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
+ unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
return ret;
}
minfo->pfn_type[i] = pfn_to_mfn(i, minfo->p2m_table,
minfo->guest_width);
- if ( lock_pages(minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type)) )
+ if ( lock_pages(xch, minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type)) )
{
ERROR("Unable to lock pfn_type array");
goto failed;
return 0;
unlock:
- unlock_pages(minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type));
+ unlock_pages(xch, minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type));
failed:
if (minfo->pfn_type)
{
if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0)
return ret;
- if ( (ret = lock_pages(pxpt->trans_pt,
+ if ( (ret = lock_pages(xch, pxpt->trans_pt,
max_px * max_px * sizeof(uint64_t))) != 0 )
return ret;
- if ( (ret = lock_pages(pxpt->pt,
+ if ( (ret = lock_pages(xch, pxpt->pt,
max_px * sizeof(struct xc_px_val))) != 0 )
{
- unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+ unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
return ret;
}
ret = xc_sysctl(xch, &sysctl);
if ( ret )
{
- unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
- unlock_pages(pxpt->pt, max_px * sizeof(struct xc_px_val));
+ unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+ unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
return ret;
}
pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
- unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
- unlock_pages(pxpt->pt, max_px * sizeof(struct xc_px_val));
+ unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+ unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
return ret;
}
if ( (ret = xc_pm_get_max_cx(xch, cpuid, &max_cx)) )
goto unlock_0;
- if ( (ret = lock_pages(cxpt, sizeof(struct xc_cx_stat))) )
+ if ( (ret = lock_pages(xch, cxpt, sizeof(struct xc_cx_stat))) )
goto unlock_0;
- if ( (ret = lock_pages(cxpt->triggers, max_cx * sizeof(uint64_t))) )
+ if ( (ret = lock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t))) )
goto unlock_1;
- if ( (ret = lock_pages(cxpt->residencies, max_cx * sizeof(uint64_t))) )
+ if ( (ret = lock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t))) )
goto unlock_2;
sysctl.cmd = XEN_SYSCTL_get_pmstat;
cxpt->cc6 = sysctl.u.get_pmstat.u.getcx.cc6;
unlock_3:
- unlock_pages(cxpt->residencies, max_cx * sizeof(uint64_t));
+ unlock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t));
unlock_2:
- unlock_pages(cxpt->triggers, max_cx * sizeof(uint64_t));
+ unlock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t));
unlock_1:
- unlock_pages(cxpt, sizeof(struct xc_cx_stat));
+ unlock_pages(xch, cxpt, sizeof(struct xc_cx_stat));
unlock_0:
return ret;
}
(!user_para->scaling_available_governors) )
return -EINVAL;
- if ( (ret = lock_pages(user_para->affected_cpus,
+ if ( (ret = lock_pages(xch, user_para->affected_cpus,
user_para->cpu_num * sizeof(uint32_t))) )
goto unlock_1;
- if ( (ret = lock_pages(user_para->scaling_available_frequencies,
+ if ( (ret = lock_pages(xch, user_para->scaling_available_frequencies,
user_para->freq_num * sizeof(uint32_t))) )
goto unlock_2;
- if ( (ret = lock_pages(user_para->scaling_available_governors,
+ if ( (ret = lock_pages(xch, user_para->scaling_available_governors,
user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char))) )
goto unlock_3;
}
unlock_4:
- unlock_pages(user_para->scaling_available_governors,
+ unlock_pages(xch, user_para->scaling_available_governors,
user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char));
unlock_3:
- unlock_pages(user_para->scaling_available_frequencies,
+ unlock_pages(xch, user_para->scaling_available_frequencies,
user_para->freq_num * sizeof(uint32_t));
unlock_2:
- unlock_pages(user_para->affected_cpus,
+ unlock_pages(xch, user_para->affected_cpus,
user_para->cpu_num * sizeof(uint32_t));
unlock_1:
return ret;
return 0;
}
-static void xc_clean_hcall_buf(void);
+static void xc_clean_hcall_buf(xc_interface *xch);
int xc_interface_close(xc_interface *xch)
{
if (rc) PERROR("Could not close hypervisor interface");
}
- xc_clean_hcall_buf();
+ xc_clean_hcall_buf(xch);
free(xch);
return rc;
#ifdef __sun__
-int lock_pages(void *addr, size_t len) { return 0; }
-void unlock_pages(void *addr, size_t len) { }
+int lock_pages(xc_interface *xch, void *addr, size_t len) { return 0; }
+void unlock_pages(xc_interface *xch, void *addr, size_t len) { }
-int hcall_buf_prep(void **addr, size_t len) { return 0; }
-void hcall_buf_release(void **addr, size_t len) { }
+int hcall_buf_prep(xc_interface *xch, void **addr, size_t len) { return 0; }
+void hcall_buf_release(xc_interface *xch, void **addr, size_t len) { }
-static void xc_clean_hcall_buf(void) { }
+static void xc_clean_hcall_buf(xc_interface *xch) { }
#else /* !__sun__ */
-int lock_pages(void *addr, size_t len)
+int lock_pages(xc_interface *xch, void *addr, size_t len)
{
int e;
void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
return e;
}
-void unlock_pages(void *addr, size_t len)
+void unlock_pages(xc_interface *xch, void *addr, size_t len)
{
void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
static pthread_key_t hcall_buf_pkey;
static pthread_once_t hcall_buf_pkey_once = PTHREAD_ONCE_INIT;
struct hcall_buf {
+ xc_interface *xch;
void *buf;
void *oldbuf;
};
{
if ( hcall_buf->buf )
{
- unlock_pages(hcall_buf->buf, PAGE_SIZE);
+ unlock_pages(hcall_buf->xch, hcall_buf->buf, PAGE_SIZE);
free(hcall_buf->buf);
}
pthread_key_create(&hcall_buf_pkey, _xc_clean_hcall_buf);
}
-static void xc_clean_hcall_buf(void)
+static void xc_clean_hcall_buf(xc_interface *xch)
{
pthread_once(&hcall_buf_pkey_once, _xc_init_hcall_buf);
_xc_clean_hcall_buf(pthread_getspecific(hcall_buf_pkey));
}
-int hcall_buf_prep(void **addr, size_t len)
+int hcall_buf_prep(xc_interface *xch, void **addr, size_t len)
{
struct hcall_buf *hcall_buf;
hcall_buf = calloc(1, sizeof(*hcall_buf));
if ( !hcall_buf )
goto out;
+ hcall_buf->xch = xch;
pthread_setspecific(hcall_buf_pkey, hcall_buf);
}
if ( !hcall_buf->buf )
{
hcall_buf->buf = xc_memalign(PAGE_SIZE, PAGE_SIZE);
- if ( !hcall_buf->buf || lock_pages(hcall_buf->buf, PAGE_SIZE) )
+ if ( !hcall_buf->buf || lock_pages(xch, hcall_buf->buf, PAGE_SIZE) )
{
free(hcall_buf->buf);
hcall_buf->buf = NULL;
}
out:
- return lock_pages(*addr, len);
+ return lock_pages(xch, *addr, len);
}
-void hcall_buf_release(void **addr, size_t len)
+void hcall_buf_release(xc_interface *xch, void **addr, size_t len)
{
struct hcall_buf *hcall_buf = pthread_getspecific(hcall_buf_pkey);
}
else
{
- unlock_pages(*addr, len);
+ unlock_pages(xch, *addr, len);
}
}
DECLARE_HYPERCALL;
long ret = -EINVAL;
- if ( hcall_buf_prep((void **)&op, nr_ops*sizeof(*op)) != 0 )
+ if ( hcall_buf_prep(xch, (void **)&op, nr_ops*sizeof(*op)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
ret = do_xen_hypercall(xch, &hypercall);
- hcall_buf_release((void **)&op, nr_ops*sizeof(*op));
+ hcall_buf_release(xch, (void **)&op, nr_ops*sizeof(*op));
out1:
return ret;
hypercall.arg[2] = 0;
hypercall.arg[3] = mmu->subject;
- if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
+ if ( lock_pages(xch, mmu->updates, sizeof(mmu->updates)) != 0 )
{
PERROR("flush_mmu_updates: mmu updates lock_pages failed");
err = 1;
mmu->idx = 0;
- unlock_pages(mmu->updates, sizeof(mmu->updates));
+ unlock_pages(xch, mmu->updates, sizeof(mmu->updates));
out:
return err;
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
- if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
+ if ( lock_pages(xch, reservation, sizeof(*reservation)) != 0 )
{
PERROR("Could not lock");
goto out1;
}
get_xen_guest_handle(extent_start, reservation->extent_start);
if ( (extent_start != NULL) &&
- (lock_pages(extent_start,
+ (lock_pages(xch, extent_start,
reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
{
PERROR("Could not lock");
- unlock_pages(reservation, sizeof(*reservation));
+ unlock_pages(xch, reservation, sizeof(*reservation));
goto out1;
}
break;
case XENMEM_machphys_mfn_list:
- if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
+ if ( lock_pages(xch, xmml, sizeof(*xmml)) != 0 )
{
PERROR("Could not lock");
goto out1;
}
get_xen_guest_handle(extent_start, xmml->extent_start);
- if ( lock_pages(extent_start,
+ if ( lock_pages(xch, extent_start,
xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
{
PERROR("Could not lock");
- unlock_pages(xmml, sizeof(*xmml));
+ unlock_pages(xch, xmml, sizeof(*xmml));
goto out1;
}
break;
case XENMEM_add_to_physmap:
- if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
+ if ( lock_pages(xch, arg, sizeof(struct xen_add_to_physmap)) )
{
PERROR("Could not lock");
goto out1;
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
case XENMEM_maximum_gpfn:
- if ( lock_pages(arg, sizeof(domid_t)) )
+ if ( lock_pages(xch, arg, sizeof(domid_t)) )
{
PERROR("Could not lock");
goto out1;
break;
case XENMEM_set_pod_target:
case XENMEM_get_pod_target:
- if ( lock_pages(arg, sizeof(struct xen_pod_target)) )
+ if ( lock_pages(xch, arg, sizeof(struct xen_pod_target)) )
{
PERROR("Could not lock");
goto out1;
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
- unlock_pages(reservation, sizeof(*reservation));
+ unlock_pages(xch, reservation, sizeof(*reservation));
get_xen_guest_handle(extent_start, reservation->extent_start);
if ( extent_start != NULL )
- unlock_pages(extent_start,
+ unlock_pages(xch, extent_start,
reservation->nr_extents * sizeof(xen_pfn_t));
break;
case XENMEM_machphys_mfn_list:
- unlock_pages(xmml, sizeof(*xmml));
+ unlock_pages(xch, xmml, sizeof(*xmml));
get_xen_guest_handle(extent_start, xmml->extent_start);
- unlock_pages(extent_start,
+ unlock_pages(xch, extent_start,
xmml->max_extents * sizeof(xen_pfn_t));
break;
case XENMEM_add_to_physmap:
- unlock_pages(arg, sizeof(struct xen_add_to_physmap));
+ unlock_pages(xch, arg, sizeof(struct xen_add_to_physmap));
break;
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
case XENMEM_maximum_gpfn:
- unlock_pages(arg, sizeof(domid_t));
+ unlock_pages(xch, arg, sizeof(domid_t));
break;
case XENMEM_set_pod_target:
case XENMEM_get_pod_target:
- unlock_pages(arg, sizeof(struct xen_pod_target));
+ unlock_pages(xch, arg, sizeof(struct xen_pod_target));
break;
}
memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
#endif
- if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
+ if ( lock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
{
PERROR("xc_get_pfn_list: pfn_buf lock failed");
return -1;
ret = do_domctl(xch, &domctl);
- unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf));
+ unlock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf));
return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
}
break;
}
- if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
+ if ( (argsize != 0) && (lock_pages(xch, arg, argsize) != 0) )
{
PERROR("Could not lock memory for version hypercall");
return -ENOMEM;
rc = do_xen_version(xch, cmd, arg);
if ( argsize != 0 )
- unlock_pages(arg, argsize);
+ unlock_pages(xch, arg, argsize);
return rc;
}
void *xc_memalign(size_t alignment, size_t size);
-int lock_pages(void *addr, size_t len);
-void unlock_pages(void *addr, size_t len);
+int lock_pages(xc_interface *xch, void *addr, size_t len);
+void unlock_pages(xc_interface *xch, void *addr, size_t len);
-int hcall_buf_prep(void **addr, size_t len);
-void hcall_buf_release(void **addr, size_t len);
+int hcall_buf_prep(xc_interface *xch, void **addr, size_t len);
+void hcall_buf_release(xc_interface *xch, void **addr, size_t len);
int do_xen_hypercall(xc_interface *xch, privcmd_hypercall_t *hypercall);
DECLARE_HYPERCALL;
- if ( hcall_buf_prep(&op, len) != 0 )
+ if ( hcall_buf_prep(xch, &op, len) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
" rebuild the user-space tool set?\n");
}
- hcall_buf_release(&op, len);
+ hcall_buf_release(xch, &op, len);
out1:
return ret;
int ret = -1;
DECLARE_HYPERCALL;
- if ( hcall_buf_prep((void **)&domctl, sizeof(*domctl)) != 0 )
+ if ( hcall_buf_prep(xch, (void **)&domctl, sizeof(*domctl)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
" rebuild the user-space tool set?\n");
}
- hcall_buf_release((void **)&domctl, sizeof(*domctl));
+ hcall_buf_release(xch, (void **)&domctl, sizeof(*domctl));
out1:
return ret;
int ret = -1;
DECLARE_HYPERCALL;
- if ( hcall_buf_prep((void **)&sysctl, sizeof(*sysctl)) != 0 )
+ if ( hcall_buf_prep(xch, (void **)&sysctl, sizeof(*sysctl)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out1;
" rebuild the user-space tool set?\n");
}
- hcall_buf_release((void **)&sysctl, sizeof(*sysctl));
+ hcall_buf_release(xch, (void **)&sysctl, sizeof(*sysctl));
out1:
return ret;
goto out;
}
- if ( lock_pages(&ctxt, sizeof(ctxt)) )
+ if ( lock_pages(xch, &ctxt, sizeof(ctxt)) )
{
ERROR("Unable to lock ctxt");
goto out;
#if defined(__i386__) || defined(__x86_64__)
out:
- unlock_pages((void *)&ctxt, sizeof ctxt);
+ unlock_pages(xch, (void *)&ctxt, sizeof ctxt);
if (p2m)
munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
if (p2m_frame_list)
set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8;
- if ( lock_pages(&bytemap, sizeof(bytemap)) != 0 )
+ if ( lock_pages(xch, &bytemap, sizeof(bytemap)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
ret = do_sysctl(xch, &sysctl);
- unlock_pages(&bytemap, sizeof(bytemap));
+ unlock_pages(xch, &bytemap, sizeof(bytemap));
out:
return ret;
hypercall.op = __HYPERVISOR_tmem_op;
hypercall.arg[0] = (unsigned long)op;
- if (lock_pages(op, sizeof(*op)) != 0)
+ if (lock_pages(xch, op, sizeof(*op)) != 0)
{
PERROR("Could not lock memory for Xen hypercall");
return -EFAULT;
DPRINTF("tmem operation failed -- need to"
" rebuild the user-space tool set?\n");
}
- unlock_pages(op, sizeof(*op));
+ unlock_pages(xch, op, sizeof(*op));
return ret;
}
op.u.ctrl.oid[2] = 0;
if (subop == TMEMC_LIST) {
- if ((arg1 != 0) && (lock_pages(buf, arg1) != 0))
+ if ((arg1 != 0) && (lock_pages(xch, buf, arg1) != 0))
{
PERROR("Could not lock memory for Xen hypercall");
return -ENOMEM;
if (subop == TMEMC_LIST) {
if (arg1 != 0)
- unlock_pages(buf, arg1);
+ unlock_pages(xch, buf, arg1);
}
return rc;
op.u.ctrl.oid[2] = oid.oid[2];
if (subop == TMEMC_LIST) {
- if ((arg1 != 0) && (lock_pages(buf, arg1) != 0))
+ if ((arg1 != 0) && (lock_pages(xch, buf, arg1) != 0))
{
PERROR("Could not lock memory for Xen hypercall");
return -ENOMEM;
if (subop == TMEMC_LIST) {
if (arg1 != 0)
- unlock_pages(buf, arg1);
+ unlock_pages(xch, buf, arg1);
}
return rc;