/* Close all VCHI service connections */
for (i = 0; i < instance->num_connections; i++) {
int32_t success;
+
vchi_service_use(instance->vchi_handle[i]);
success = vchi_service_close(instance->vchi_handle[i]);
static inline unsigned int vcaddr_to_pfn(unsigned long vc_addr)
{
unsigned long pfn = vc_addr & 0x3FFFFFFF;
+
pfn += mm_vc_mem_phys_addr;
pfn >>= PAGE_SHIFT;
return pfn;
if (resource->map) {
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long addr;
+
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
/* Finally, remap it */
unsigned long pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
+
pfn += mm_vc_mem_phys_addr;
pfn += addr - vma->vm_start;
pfn >>= PAGE_SHIFT;
/* Flush if requested */
if (resource->res_cached && flush) {
dma_addr_t phys_addr = 0;
+
resource->res_stats[FLUSH]++;
phys_addr =
if (map->vma) {
unsigned long start;
unsigned long end;
+
start = map->vma->vm_start;
end = map->vma->vm_end;
if ((resource != NULL) && resource->res_cached) {
unsigned long base = ioparam.s[i].addr & ~(PAGE_SIZE-1);
unsigned long end = (ioparam.s[i].addr + ioparam.s[i].size + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
+
resource->res_stats[ioparam.s[i].cmd == 1 ? INVALID:FLUSH]++;
/* L1/L2 cache flush */
for (i=0; i<ioparam.op_count; i++) {
struct vmcs_sm_ioctl_clean_invalid_block *op = block + i;
+
for (j = 0; j < op->block_count; ++j) {