CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_RESOURCES_64BIT=y
+ # CONFIG_HIGHPTE is not set
CONFIG_MTRR=y
# CONFIG_REGPARM is not set
CONFIG_SECCOMP=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_RESOURCES_64BIT=y
+ # CONFIG_HIGHPTE is not set
# CONFIG_REGPARM is not set
CONFIG_SECCOMP=y
CONFIG_HZ_100=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_RESOURCES_64BIT=y
+ # CONFIG_HIGHPTE is not set
CONFIG_MTRR=y
CONFIG_REGPARM=y
CONFIG_SECCOMP=y
page = read_cr3();
page = ((unsigned long *) __va(page))[address >> 22];
- printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
- machine_to_phys(page));
-#ifdef CONFIG_HIGHPTE
+ if (oops_may_print())
+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
/*
* We must not directly access the pte in the highpte
- * case, the page table might be allocated in highmem.
+ * case if the page table is located in highmem.
* And lets rather not kmap-atomic the pte, just in case
* it's allocated already.
*/
- #ifndef CONFIG_HIGHPTE
++#ifdef CONFIG_HIGHPTE
+ if ((page >> PAGE_SHIFT) >= highstart_pfn)
+ return;
+ #endif
- if (page & 1) {
+ if ((page & 1) && oops_may_print()) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = machine_to_phys(page);
#ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
+ if (pte && PageHighMem(pte)) {
+ struct mmuext_op op;
+
+ kmap_flush_unused();
+ op.cmd = MMUEXT_PIN_L1_TABLE;
+ op.arg1.mfn = pfn_to_mfn(page_to_pfn(pte));
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+ }
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ #endif
if (pte) {
SetPageForeign(pte, pte_free);
- set_page_count(pte, 1);
+ init_page_count(pte);
}
- #endif
return pte;
}
void pte_free(struct page *pte)
{
- unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
+ unsigned long pfn = page_to_pfn(pte);
- if (!pte_write(*virt_to_ptep(va)))
- BUG_ON(HYPERVISOR_update_va_mapping(
- va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
+ if (!PageHighMem(pte)) {
+ unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
+
+ if (!pte_write(*virt_to_ptep(va)))
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ va, pfn_pte(pfn, PAGE_KERNEL), 0));
+ } else {
+ struct mmuext_op op;
+
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.arg1.mfn = pfn_to_mfn(pfn);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+ }
ClearPageForeign(pte);
- set_page_count(pte, 1);
+ init_page_count(pte);
__free_page(pte);
}
mm_unpin(mm);
}
- set_page_count(pte, 1);
+ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+ struct page *pte;
+
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ if (pte) {
+ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
+ }
+ return pte;
+ }
+
void pte_free(struct page *pte)
{
unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
if (!pte_write(*virt_to_ptep(va)))
BUG_ON(HYPERVISOR_update_va_mapping(
va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
- set_page_count(pte, 1);
+
+ ClearPageForeign(pte);
++ init_page_count(pte);
+
__free_page(pte);
}
#endif /* CONFIG_XEN */
static inline void pud_free(pud_t *pud)
{
- pte_t *ptep = virt_to_ptep(pud);
-
- if (!pte_write(*ptep)) {
- BUG_ON(HYPERVISOR_update_va_mapping(
- (unsigned long)pud,
- pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
- 0));
- }
- free_page((unsigned long)pud);
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ pte_free(virt_to_page(pud));
}
+static inline void pgd_list_add(pgd_t *pgd)
+{
+ struct page *page = virt_to_page(pgd);
+
+ spin_lock(&pgd_lock);
+ page->index = (pgoff_t)pgd_list;
+ if (pgd_list)
+ pgd_list->private = (unsigned long)&page->index;
+ pgd_list = page;
+ page->private = (unsigned long)&pgd_list;
+ spin_unlock(&pgd_lock);
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+ struct page *next, **pprev, *page = virt_to_page(pgd);
+
+ spin_lock(&pgd_lock);
+ next = (struct page *)page->index;
+ pprev = (struct page **)page->private;
+ *pprev = next;
+ if (next)
+ next->private = (unsigned long)pprev;
+ spin_unlock(&pgd_lock);
+}
+
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- /*
- * We allocate two contiguous pages for kernel and user.
- */
- unsigned boundary;
+ /*
+ * We allocate two contiguous pages for kernel and user.
+ */
+ unsigned boundary;
pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
-
if (!pgd)
return NULL;
+ pgd_list_add(pgd);
/*
* Copy kernel pointers in from init.
* Could keep a freelist or slab cache of those because the kernel
--- /dev/null
- ipv6-no-autoconf.patch
+git-3566561bfadffcb5dbc85d576be80c0dbf2cccc9.patch
+linux-2.6.19-rc1-kexec-move_segment_code-i386.patch
+git-4bfaaef01a1badb9e8ffb0c0a37cd2379008d21f.patch
+linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch
+blktap-aio-16_03_06.patch
+fix-ide-cd-pio-mode.patch
+i386-mach-io-check-nmi.patch
+net-csum.patch
+net-gso-5-rcv-mss.patch
+net-gso-6-linear-segmentation.patch
+pmd-shared.patch
+rename-TSS_sysenter_esp0-SYSENTER_stack_esp0.patch
+xen-hotplug.patch
+xenoprof-generic.patch
+x86-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
+x86_64-put-note-sections-into-a-pt_note-segment-in-vmlinux.patch
+git-dbaab49f92ff6ae6255762a948375e4036cbdbd2.patch
+x86-elfnote-as-preprocessor-macro.patch
+fixaddr-top.patch