struct list_head map_list; /* Maps associated with a resource. */
struct SM_PRIV_DATA_T *private;
+ bool map; /* whether to map pages up front */
};
/* Private file data associated with each opened device.
vcsm_vma_open(vma);
resource->res_stats[MAP]++;
vmcs_sm_release_resource(resource, 0);
+
+ if (resource->map) {
+ /* We don't use vmf->pgoff since that has the fake offset */
+ unsigned long addr;
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
+ /* Finally, remap it */
+ unsigned long pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
+ pfn += mm_vc_mem_phys_addr;
+ pfn += addr - vma->vm_start;
+ pfn >>= PAGE_SHIFT;
+ ret = vm_insert_pfn(vma, addr, pfn);
+ }
+ }
+
return 0;
error:
struct SM_RESOURCE_T *resource;
VC_SM_ALLOC_T alloc = { 0 };
VC_SM_ALLOC_RESULT_T result = { 0 };
+ enum vmcs_sm_cache_e cached = ioparam->cached;
+ bool map = false;
+
+ /* flag to requst buffer is mapped up front, rather than lazily */
+ if (cached & 0x80 ) {
+ map = true;
+ cached &= ~0x80;
+ }
/* Setup our allocation parameters */
- alloc.type = ((ioparam->cached == VMCS_SM_CACHE_VC)
- || (ioparam->cached ==
+ alloc.type = ((cached == VMCS_SM_CACHE_VC)
+ || (cached ==
VMCS_SM_CACHE_BOTH)) ? VC_SM_ALLOC_CACHED :
VC_SM_ALLOC_NON_CACHED;
alloc.base_unit = ioparam->size;
resource->res_handle = result.res_handle;
resource->res_base_mem = result.res_mem;
resource->res_size = alloc.base_unit * alloc.num_unit;
- resource->res_cached = ioparam->cached;
+ resource->res_cached = cached;
+ resource->map = map;
/* Kernel/user GUID. This global identifier is used for mmap'ing the
* allocated region from user space, it is passed as the mmap'ing