struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
- int (*split)(struct vm_area_struct * area, unsigned long addr);
int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
}
}
-static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
-{
- if (addr & ~(huge_page_mask(hstate_vma(vma))))
- return -EINVAL;
- return 0;
-}
-
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
- .split = hugetlb_vm_op_split,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
struct vm_area_struct *new;
int err;
- if (vma->vm_ops && vma->vm_ops->split) {
- err = vma->vm_ops->split(vma, addr);
- if (err)
- return err;
- }
+ if (is_vm_hugetlb_page(vma) && (addr &
+ ~(huge_page_mask(hstate_vma(vma)))))
+ return -EINVAL;
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)